aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
-rw-r--r--MAINTAINERS225
-rw-r--r--app/pdump/main.c8
-rw-r--r--app/proc_info/main.c18
-rw-r--r--app/test-crypto-perf/Makefile6
-rw-r--r--app/test-crypto-perf/cperf_ops.c158
-rw-r--r--app/test-crypto-perf/cperf_ops.h2
-rw-r--r--app/test-crypto-perf/cperf_options.h15
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c170
-rw-r--r--app/test-crypto-perf/cperf_test_common.c244
-rw-r--r--app/test-crypto-perf/cperf_test_common.h52
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c239
-rw-r--r--app/test-crypto-perf/cperf_test_pmd_cyclecount.c520
-rw-r--r--app/test-crypto-perf/cperf_test_pmd_cyclecount.h61
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c237
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c59
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c6
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h4
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c283
-rw-r--r--app/test-crypto-perf/main.c110
-rw-r--r--app/test-eventdev/evt_common.h53
-rw-r--r--app/test-eventdev/evt_options.c10
-rw-r--r--app/test-eventdev/evt_options.h8
-rw-r--r--app/test-eventdev/test_order_atq.c6
-rw-r--r--app/test-eventdev/test_order_common.c3
-rw-r--r--app/test-eventdev/test_order_queue.c10
-rw-r--r--app/test-eventdev/test_perf_atq.c6
-rw-r--r--app/test-eventdev/test_perf_common.c47
-rw-r--r--app/test-eventdev/test_perf_common.h1
-rw-r--r--app/test-eventdev/test_perf_queue.c10
-rw-r--r--app/test-pmd/Makefile10
-rw-r--r--app/test-pmd/cmdline.c2071
-rw-r--r--app/test-pmd/cmdline_flow.c70
-rw-r--r--app/test-pmd/cmdline_mtr.c1018
-rw-r--r--app/test-pmd/cmdline_mtr.h49
-rw-r--r--app/test-pmd/cmdline_tm.c2071
-rw-r--r--app/test-pmd/cmdline_tm.h56
-rw-r--r--app/test-pmd/config.c244
-rw-r--r--app/test-pmd/csumonly.c75
-rw-r--r--app/test-pmd/flowgen.c1
-rw-r--r--app/test-pmd/ieee1588fwd.c26
-rw-r--r--app/test-pmd/iofwd.c1
-rw-r--r--app/test-pmd/macfwd.c1
-rw-r--r--app/test-pmd/macswap.c1
-rw-r--r--app/test-pmd/parameters.c26
-rw-r--r--app/test-pmd/rxonly.c5
-rw-r--r--app/test-pmd/testpmd.c211
-rw-r--r--app/test-pmd/testpmd.h96
-rw-r--r--app/test-pmd/tm.c865
-rw-r--r--app/test-pmd/txonly.c1
-rwxr-xr-xbuildtools/auto-config-h.sh6
-rw-r--r--buildtools/pmdinfogen/pmdinfogen.c9
-rw-r--r--config/common_armv8a_linuxapp2
-rw-r--r--config/common_base96
-rw-r--r--config/defconfig_arm-armv7a-linuxapp-gcc3
-rw-r--r--config/defconfig_arm64-dpaa-linuxapp-gcc76
-rw-r--r--config/defconfig_arm64-dpaa2-linuxapp-gcc2
-rwxr-xr-xdevtools/get-maintainer.sh85
-rwxr-xr-xdevtools/git-log-fixes.sh11
-rwxr-xr-xdevtools/test-build.sh10
-rwxr-xr-xdevtools/validate-abi.sh397
-rw-r--r--doc/api/doxy-api-index.md23
-rw-r--r--doc/api/doxy-api.conf7
-rw-r--r--doc/guides/conf.py7
-rw-r--r--doc/guides/contributing/documentation.rst1
-rw-r--r--doc/guides/contributing/patches.rst5
-rw-r--r--doc/guides/contributing/versioning.rst9
-rw-r--r--doc/guides/cryptodevs/aesni_gcm.rst4
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst21
-rw-r--r--doc/guides/cryptodevs/dpaa2_sec.rst8
-rw-r--r--doc/guides/cryptodevs/dpaa_sec.rst182
-rw-r--r--doc/guides/cryptodevs/features/aesni_mb.ini3
-rw-r--r--doc/guides/cryptodevs/features/default.ini4
-rw-r--r--doc/guides/cryptodevs/features/dpaa2_sec.ini1
-rw-r--r--doc/guides/cryptodevs/features/dpaa_sec.ini40
-rw-r--r--doc/guides/cryptodevs/features/mrvl.ini42
-rw-r--r--doc/guides/cryptodevs/features/openssl.ini3
-rw-r--r--doc/guides/cryptodevs/index.rst2
-rw-r--r--doc/guides/cryptodevs/mrvl.rst206
-rw-r--r--doc/guides/cryptodevs/openssl.rst1
-rw-r--r--doc/guides/cryptodevs/qat.rst1
-rw-r--r--doc/guides/eventdevs/octeontx.rst5
-rw-r--r--doc/guides/eventdevs/sw.rst13
-rw-r--r--doc/guides/freebsd_gsg/build_sample_apps.rst3
-rw-r--r--doc/guides/howto/index.rst1
-rw-r--r--doc/guides/howto/rte_flow.rst333
-rw-r--r--doc/guides/index.rst1
-rw-r--r--doc/guides/linux_gsg/build_dpdk.rst4
-rw-r--r--doc/guides/linux_gsg/build_sample_apps.rst8
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst53
-rw-r--r--doc/guides/nics/ark.rst3
-rw-r--r--doc/guides/nics/dpaa.rst378
-rw-r--r--doc/guides/nics/dpaa2.rst8
-rw-r--r--doc/guides/nics/fail_safe.rst4
-rw-r--r--doc/guides/nics/features.rst78
-rw-r--r--doc/guides/nics/features/bnxt.ini17
-rw-r--r--doc/guides/nics/features/default.ini2
-rw-r--r--doc/guides/nics/features/dpaa.ini24
-rw-r--r--doc/guides/nics/features/dpaa2.ini2
-rw-r--r--doc/guides/nics/features/enic.ini1
-rw-r--r--doc/guides/nics/features/fm10k_vec.ini35
-rw-r--r--doc/guides/nics/features/liquidio.ini2
-rw-r--r--doc/guides/nics/features/mlx4.ini1
-rw-r--r--doc/guides/nics/features/mlx5.ini2
-rw-r--r--doc/guides/nics/features/mrvl.ini23
-rw-r--r--doc/guides/nics/features/nfp_pf.ini (renamed from doc/guides/nics/features/nfp.ini)1
-rw-r--r--doc/guides/nics/features/nfp_vf.ini (renamed from doc/guides/nics/features/fm10k_vf_vec.ini)17
-rw-r--r--doc/guides/nics/features/octeontx.ini19
-rw-r--r--doc/guides/nics/features/sfc_efx.ini2
-rw-r--r--doc/guides/nics/features/thunderx.ini1
-rw-r--r--doc/guides/nics/features/xenvirt.ini6
-rw-r--r--doc/guides/nics/fm10k.rst11
-rw-r--r--doc/guides/nics/i40e.rst30
-rw-r--r--doc/guides/nics/index.rst3
-rw-r--r--doc/guides/nics/ixgbe.rst11
-rw-r--r--doc/guides/nics/liquidio.rst10
-rw-r--r--doc/guides/nics/mlx4.rst37
-rw-r--r--doc/guides/nics/mlx5.rst74
-rw-r--r--doc/guides/nics/mrvl.rst253
-rw-r--r--doc/guides/nics/nfp.rst78
-rw-r--r--doc/guides/nics/octeontx.rst252
-rw-r--r--doc/guides/nics/sfc_efx.rst8
-rw-r--r--doc/guides/nics/tap.rst9
-rw-r--r--doc/guides/nics/vhost.rst5
-rw-r--r--doc/guides/nics/virtio.rst7
-rw-r--r--doc/guides/prog_guide/cryptodev_lib.rst6
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst13
-rw-r--r--doc/guides/prog_guide/event_ethernet_rx_adapter.rst168
-rw-r--r--doc/guides/prog_guide/eventdev.rst4
-rw-r--r--doc/guides/prog_guide/flow_classify_lib.rst427
-rw-r--r--doc/guides/prog_guide/generic_segmentation_offload_lib.rst257
-rw-r--r--doc/guides/prog_guide/img/gso-output-segment-format.svg313
-rw-r--r--doc/guides/prog_guide/img/gso-three-seg-mbuf.svg477
-rw-r--r--doc/guides/prog_guide/img/mbuf1.svg2
-rw-r--r--doc/guides/prog_guide/img/member_i1.svg1613
-rw-r--r--doc/guides/prog_guide/img/member_i2.svg36
-rw-r--r--doc/guides/prog_guide/img/member_i3.svg148
-rw-r--r--doc/guides/prog_guide/img/member_i4.svg450
-rw-r--r--doc/guides/prog_guide/img/member_i5.svg163
-rw-r--r--doc/guides/prog_guide/img/member_i6.svg332
-rw-r--r--doc/guides/prog_guide/img/member_i7.svg399
-rw-r--r--doc/guides/prog_guide/index.rst19
-rw-r--r--doc/guides/prog_guide/member_lib.rst420
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst60
-rw-r--r--doc/guides/prog_guide/port_hotplug_framework.rst8
-rw-r--r--doc/guides/prog_guide/power_man.rst12
-rw-r--r--doc/guides/prog_guide/profile_app.rst37
-rw-r--r--doc/guides/prog_guide/rte_flow.rst159
-rw-r--r--doc/guides/prog_guide/rte_security.rst564
-rw-r--r--doc/guides/prog_guide/source_org.rst1
-rw-r--r--doc/guides/prog_guide/traffic_metering_and_policing.rst102
-rw-r--r--doc/guides/prog_guide/vhost_lib.rst29
-rw-r--r--doc/guides/prog_guide/writing_efficient_code.rst15
-rw-r--r--doc/guides/rel_notes/deprecation.rst85
-rw-r--r--doc/guides/rel_notes/index.rst1
-rw-r--r--doc/guides/rel_notes/release_17_11.rst600
-rw-r--r--doc/guides/sample_app_ug/cmd_line.rst21
-rw-r--r--doc/guides/sample_app_ug/compiling.rst122
-rw-r--r--doc/guides/sample_app_ug/dist_app.rst22
-rw-r--r--doc/guides/sample_app_ug/ethtool.rst23
-rw-r--r--doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst21
-rw-r--r--doc/guides/sample_app_ug/exception_path.rst23
-rw-r--r--doc/guides/sample_app_ug/flow_classify.rst575
-rw-r--r--doc/guides/sample_app_ug/flow_filtering.rst545
-rw-r--r--doc/guides/sample_app_ug/hello_world.rst21
-rw-r--r--doc/guides/sample_app_ug/index.rst4
-rw-r--r--doc/guides/sample_app_ug/intro.rst153
-rw-r--r--doc/guides/sample_app_ug/ip_frag.rst27
-rw-r--r--doc/guides/sample_app_ug/ip_reassembly.rst22
-rw-r--r--doc/guides/sample_app_ug/ipsec_secgw.rst77
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst35
-rw-r--r--doc/guides/sample_app_ug/keep_alive.rst22
-rw-r--r--doc/guides/sample_app_ug/kernel_nic_interface.rst32
-rw-r--r--doc/guides/sample_app_ug/l2_forward_cat.rst34
-rw-r--r--doc/guides/sample_app_ug/l2_forward_crypto.rst21
-rw-r--r--doc/guides/sample_app_ug/l2_forward_job_stats.rst25
-rw-r--r--doc/guides/sample_app_ug/l2_forward_real_virtual.rst23
-rw-r--r--doc/guides/sample_app_ug/l3_forward.rst33
-rw-r--r--doc/guides/sample_app_ug/l3_forward_access_ctrl.rst23
-rw-r--r--doc/guides/sample_app_ug/l3_forward_power_man.rst27
-rw-r--r--doc/guides/sample_app_ug/l3_forward_virtual.rst29
-rw-r--r--doc/guides/sample_app_ug/link_status_intr.rst31
-rw-r--r--doc/guides/sample_app_ug/load_balancer.rst19
-rw-r--r--doc/guides/sample_app_ug/multi_process.rst22
-rw-r--r--doc/guides/sample_app_ug/netmap_compatibility.rst25
-rw-r--r--doc/guides/sample_app_ug/packet_ordering.rst23
-rw-r--r--doc/guides/sample_app_ug/performance_thread.rst22
-rw-r--r--doc/guides/sample_app_ug/ptpclient.rst35
-rw-r--r--doc/guides/sample_app_ug/qos_metering.rst24
-rw-r--r--doc/guides/sample_app_ug/qos_scheduler.rst21
-rw-r--r--doc/guides/sample_app_ug/quota_watermark.rst22
-rw-r--r--doc/guides/sample_app_ug/rxtx_callbacks.rst31
-rw-r--r--doc/guides/sample_app_ug/server_node_efd.rst21
-rw-r--r--doc/guides/sample_app_ug/service_cores.rst172
-rw-r--r--doc/guides/sample_app_ug/skeleton.rst31
-rw-r--r--doc/guides/sample_app_ug/tep_termination.rst31
-rw-r--r--doc/guides/sample_app_ug/test_pipeline.rst20
-rw-r--r--doc/guides/sample_app_ug/timer.rst21
-rw-r--r--doc/guides/sample_app_ug/vhost.rst16
-rw-r--r--doc/guides/sample_app_ug/vhost_scsi.rst19
-rw-r--r--doc/guides/sample_app_ug/vm_power_management.rst9
-rw-r--r--doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst19
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst9
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst443
-rw-r--r--doc/guides/tools/cryptoperf.rst22
-rw-r--r--doc/guides/tools/testeventdev.rst10
-rw-r--r--doc/guides/xen/img/dpdk_xen_pkt_switch.pngbin163842 -> 0 bytes
-rw-r--r--doc/guides/xen/img/grant_refs.pngbin6405 -> 0 bytes
-rw-r--r--doc/guides/xen/img/grant_table.pngbin96762 -> 0 bytes
-rw-r--r--doc/guides/xen/index.rst38
-rw-r--r--doc/guides/xen/pkt_switch.rst470
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/bus/Makefile6
-rw-r--r--drivers/bus/dpaa/Makefile78
-rw-r--r--drivers/bus/dpaa/base/fman/fman.c612
-rw-r--r--drivers/bus/dpaa/base/fman/fman_hw.c590
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c214
-rw-r--r--drivers/bus/dpaa/base/fman/of.c576
-rw-r--r--drivers/bus/dpaa/base/qbman/bman.c394
-rw-r--r--drivers/bus/dpaa/base/qbman/bman.h550
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c323
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_priv.h125
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_alloc.c104
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_sys.c136
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_sys.h61
-rw-r--r--drivers/bus/dpaa/base/qbman/process.c331
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c2497
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.h888
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c288
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_priv.h310
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c525
-rw-r--r--drivers/bus/dpaa/include/compat.h385
-rw-r--r--drivers/bus/dpaa/include/dpaa_bits.h65
-rw-r--r--drivers/bus/dpaa/include/dpaa_list.h101
-rw-r--r--drivers/bus/dpaa/include/dpaa_rbtree.h143
-rw-r--r--drivers/bus/dpaa/include/fman.h458
-rw-r--r--drivers/bus/dpaa/include/fsl_bman.h375
-rw-r--r--drivers/bus/dpaa/include/fsl_fman.h181
-rw-r--r--drivers/bus/dpaa/include/fsl_fman_crc64.h263
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h2021
-rw-r--r--drivers/bus/dpaa/include/fsl_usd.h107
-rw-r--r--drivers/bus/dpaa/include/netcfg.h96
-rw-r--r--drivers/bus/dpaa/include/of.h190
-rw-r--r--drivers/bus/dpaa/include/process.h107
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map66
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h173
-rw-r--r--drivers/bus/dpaa/rte_dpaa_logs.h107
-rw-r--r--drivers/bus/fslmc/Makefile3
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c233
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c641
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h52
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c182
-rw-r--r--drivers/bus/fslmc/mc/dpci.c202
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c163
-rw-r--r--drivers/bus/fslmc/mc/dpio.c230
-rw-r--r--drivers/bus/fslmc/mc/dpmng.c33
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h191
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h125
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h257
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h222
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h186
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon_cmd.h193
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio.h299
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio_cmd.h178
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h41
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng_cmd.h41
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h217
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_sys.h36
-rw-r--r--drivers/bus/fslmc/mc/mc_sys.c5
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c15
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c10
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c52
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h8
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h322
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h4
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h183
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c973
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h140
-rw-r--r--drivers/bus/fslmc/qbman/qbman_private.h174
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h151
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h25
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map16
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h63
-rw-r--r--drivers/bus/pci/Makefile62
-rw-r--r--drivers/bus/pci/bsd/Makefile32
-rw-r--r--drivers/bus/pci/bsd/pci.c (renamed from lib/librte_eal/bsdapp/eal/eal_pci.c)18
-rw-r--r--drivers/bus/pci/linux/Makefile36
-rw-r--r--drivers/bus/pci/linux/pci.c (renamed from lib/librte_eal/linuxapp/eal/eal_pci.c)148
-rw-r--r--drivers/bus/pci/linux/pci_init.h (renamed from lib/librte_eal/linuxapp/eal/eal_pci_init.h)16
-rw-r--r--drivers/bus/pci/linux/pci_uio.c (renamed from lib/librte_eal/linuxapp/eal/eal_pci_uio.c)17
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c (renamed from lib/librte_eal/linuxapp/eal/eal_pci_vfio.c)487
-rw-r--r--drivers/bus/pci/pci_common.c (renamed from lib/librte_eal/common/eal_common_pci.c)78
-rw-r--r--drivers/bus/pci/pci_common_uio.c (renamed from lib/librte_eal/common/eal_common_pci_uio.c)8
-rw-r--r--drivers/bus/pci/private.h248
-rw-r--r--drivers/bus/pci/rte_bus_pci.h340
-rw-r--r--drivers/bus/pci/rte_bus_pci_version.map18
-rw-r--r--drivers/bus/vdev/Makefile (renamed from drivers/net/xenvirt/Makefile)18
-rw-r--r--drivers/bus/vdev/rte_bus_vdev.h (renamed from lib/librte_eal/common/include/rte_vdev.h)22
-rw-r--r--drivers/bus/vdev/rte_bus_vdev_version.map10
-rw-r--r--drivers/bus/vdev/vdev.c (renamed from lib/librte_eal/common/eal_common_vdev.c)22
-rw-r--r--drivers/bus/vdev/vdev_logs.h45
-rw-r--r--drivers/crypto/Makefile15
-rw-r--r--drivers/crypto/aesni_gcm/Makefile3
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c97
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c2
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h7
-rw-r--r--drivers/crypto/aesni_mb/Makefile5
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c196
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c51
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h12
-rw-r--r--drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map (renamed from drivers/crypto/aesni_mb/rte_pmd_aesni_version.map)0
-rw-r--r--drivers/crypto/armv8/Makefile5
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c66
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c2
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h8
-rw-r--r--drivers/crypto/armv8/rte_pmd_armv8_version.map (renamed from drivers/crypto/armv8/rte_armv8_pmd_version.map)0
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile15
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c497
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h62
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c676
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h782
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h387
-rw-r--r--drivers/crypto/dpaa_sec/Makefile73
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c1548
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h402
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h (renamed from drivers/net/xenvirt/virtio_logs.h)46
-rw-r--r--drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map4
-rw-r--r--drivers/crypto/kasumi/Makefile3
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c79
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c2
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h7
-rw-r--r--drivers/crypto/mrvl/Makefile66
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h50
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd.c857
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_ops.c778
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_private.h123
-rw-r--r--drivers/crypto/mrvl/rte_pmd_mrvl_version.map3
-rw-r--r--drivers/crypto/null/Makefile3
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c80
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c2
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h2
-rw-r--r--drivers/crypto/openssl/Makefile3
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c584
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c52
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h10
-rw-r--r--drivers/crypto/qat/Makefile3
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h20
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h6
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c28
-rw-r--r--drivers/crypto/qat/qat_crypto.c279
-rw-r--r--drivers/crypto/qat/qat_crypto.h17
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h54
-rw-r--r--drivers/crypto/qat/qat_qp.c20
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c63
-rw-r--r--drivers/crypto/scheduler/Makefile3
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c52
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c4
-rw-r--r--drivers/crypto/snow3g/Makefile3
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c71
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h7
-rw-r--r--drivers/crypto/zuc/Makefile3
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c65
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c2
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h7
-rw-r--r--drivers/event/Makefile6
-rw-r--r--drivers/event/dpaa2/Makefile4
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c178
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h8
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c10
-rw-r--r--drivers/event/octeontx/Makefile11
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map6
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c130
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h11
-rw-r--r--drivers/event/octeontx/ssovf_worker.h34
-rw-r--r--drivers/event/skeleton/Makefile3
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c5
-rw-r--r--drivers/event/sw/Makefile4
-rw-r--r--drivers/event/sw/rte_pmd_sw_event_version.map (renamed from drivers/event/sw/rte_pmd_evdev_sw_version.map)0
-rw-r--r--drivers/event/sw/sw_evdev.c61
-rw-r--r--drivers/event/sw/sw_evdev.h1
-rw-r--r--drivers/event/sw/sw_evdev_worker.c14
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c2
-rw-r--r--drivers/mempool/Makefile7
-rw-r--r--drivers/mempool/dpaa/Makefile59
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c284
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h76
-rw-r--r--drivers/mempool/dpaa/rte_mempool_dpaa_version.map8
-rw-r--r--drivers/mempool/dpaa2/Makefile1
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c38
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.h2
-rw-r--r--drivers/mempool/octeontx/Makefile68
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c830
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h131
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.c (renamed from drivers/event/octeontx/ssovf_mbox.c)24
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h (renamed from drivers/event/octeontx/rte_pmd_octeontx_ssovf.h)9
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h68
-rw-r--r--drivers/mempool/octeontx/octeontx_ssovf.c (renamed from drivers/event/octeontx/ssovf_probe.c)31
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c253
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx_version.map9
-rw-r--r--drivers/mempool/ring/Makefile1
-rw-r--r--drivers/mempool/stack/Makefile1
-rw-r--r--drivers/net/Makefile49
-rw-r--r--drivers/net/af_packet/Makefile3
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c51
-rw-r--r--drivers/net/ark/Makefile3
-rw-r--r--drivers/net/ark/ark_ddm.c2
-rw-r--r--drivers/net/ark/ark_ddm.h4
-rw-r--r--drivers/net/ark/ark_ethdev.c22
-rw-r--r--drivers/net/ark/ark_ethdev.h37
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c25
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c9
-rw-r--r--drivers/net/ark/ark_ext.h2
-rw-r--r--drivers/net/ark/ark_global.h2
-rw-r--r--drivers/net/ark/ark_mpu.c2
-rw-r--r--drivers/net/ark/ark_mpu.h4
-rw-r--r--drivers/net/ark/ark_pktchkr.c4
-rw-r--r--drivers/net/ark/ark_pktdir.c5
-rw-r--r--drivers/net/ark/ark_pktgen.c4
-rw-r--r--drivers/net/ark/ark_udm.c2
-rw-r--r--drivers/net/ark/ark_udm.h4
-rw-r--r--drivers/net/avp/Makefile3
-rw-r--r--drivers/net/avp/avp_ethdev.c31
-rw-r--r--drivers/net/avp/rte_avp_common.h20
-rw-r--r--drivers/net/bnx2x/Makefile3
-rw-r--r--drivers/net/bnx2x/bnx2x.c50
-rw-r--r--drivers/net/bnx2x/bnx2x.h27
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c16
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/bnx2x/ecore_sp.h4
-rw-r--r--drivers/net/bnxt/Makefile5
-rw-r--r--drivers/net/bnxt/bnxt.h24
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c2
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h10
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c1242
-rw-r--r--drivers/net/bnxt/bnxt_filter.c1037
-rw-r--r--drivers/net/bnxt/bnxt_filter.h88
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c1006
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h25
-rw-r--r--drivers/net/bnxt/bnxt_irq.c11
-rw-r--r--drivers/net/bnxt/bnxt_irq.h3
-rw-r--r--drivers/net/bnxt/bnxt_nvm_defs.h75
-rw-r--r--drivers/net/bnxt/bnxt_ring.c14
-rw-r--r--drivers/net/bnxt/bnxt_ring.h4
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c237
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h6
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c110
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h20
-rw-r--r--drivers/net/bnxt/bnxt_stats.c69
-rw-r--r--drivers/net/bnxt/bnxt_stats.h7
-rw-r--r--drivers/net/bnxt/bnxt_txq.h2
-rw-r--r--drivers/net/bnxt/bnxt_txr.c35
-rw-r--r--drivers/net/bnxt/bnxt_txr.h23
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c9
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h7
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h2194
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c54
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h32
-rw-r--r--drivers/net/bonding/Makefile8
-rw-r--r--drivers/net/bonding/rte_eth_bond.h41
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c238
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.h48
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h12
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c6
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.h6
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c75
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c38
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c132
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h45
-rw-r--r--drivers/net/bonding/rte_pmd_bond_version.map (renamed from drivers/net/bonding/rte_eth_bond_version.map)4
-rw-r--r--drivers/net/cxgbe/Makefile5
-rw-r--r--drivers/net/cxgbe/base/adapter.h1
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c4
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c5
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c1
-rw-r--r--drivers/net/cxgbe/sge.c10
-rw-r--r--drivers/net/dpaa/Makefile63
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c1109
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h182
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c756
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.h297
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map4
-rw-r--r--drivers/net/dpaa2/Makefile3
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c56
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c571
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h20
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c75
-rw-r--r--drivers/net/dpaa2/mc/dpkg.c107
-rw-r--r--drivers/net/dpaa2/mc/dpni.c1326
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h69
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h1122
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h1020
-rw-r--r--drivers/net/dpaa2/mc/fsl_net.h2
-rw-r--r--drivers/net/dpaa2/rte_pmd_dpaa2_version.map8
-rw-r--r--drivers/net/e1000/Makefile5
-rw-r--r--drivers/net/e1000/em_ethdev.c28
-rw-r--r--drivers/net/e1000/em_rxtx.c22
-rw-r--r--drivers/net/e1000/igb_ethdev.c56
-rw-r--r--drivers/net/e1000/igb_flow.c9
-rw-r--r--drivers/net/e1000/igb_pf.c1
-rw-r--r--drivers/net/e1000/igb_rxtx.c73
-rw-r--r--drivers/net/ena/Makefile3
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h4
-rw-r--r--drivers/net/ena/ena_ethdev.c15
-rw-r--r--drivers/net/ena/ena_ethdev.h1
-rw-r--r--drivers/net/enic/Makefile3
-rw-r--r--drivers/net/enic/base/vnic_cq.c14
-rw-r--r--drivers/net/enic/base/vnic_dev.c120
-rw-r--r--drivers/net/enic/base/vnic_dev.h4
-rw-r--r--drivers/net/enic/base/vnic_rq.c9
-rw-r--r--drivers/net/enic/base/vnic_rss.c32
-rw-r--r--drivers/net/enic/base/vnic_wq.c7
-rw-r--r--drivers/net/enic/enic.h7
-rw-r--r--drivers/net/enic/enic_compat.h20
-rw-r--r--drivers/net/enic/enic_ethdev.c57
-rw-r--r--drivers/net/enic/enic_main.c30
-rw-r--r--drivers/net/enic/enic_rxtx.c33
-rw-r--r--drivers/net/failsafe/Makefile3
-rw-r--r--drivers/net/failsafe/failsafe.c2
-rw-r--r--drivers/net/failsafe/failsafe_args.c32
-rw-r--r--drivers/net/failsafe/failsafe_eal.c28
-rw-r--r--drivers/net/failsafe/failsafe_ether.c55
-rw-r--r--drivers/net/failsafe/failsafe_ops.c149
-rw-r--r--drivers/net/failsafe/failsafe_private.h55
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c3
-rw-r--r--drivers/net/fm10k/Makefile5
-rw-r--r--drivers/net/fm10k/fm10k.h11
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c137
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c12
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c8
-rw-r--r--drivers/net/i40e/Makefile7
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h3
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1128
-rw-r--r--drivers/net/i40e/i40e_ethdev.h231
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c454
-rw-r--r--drivers/net/i40e/i40e_fdir.c683
-rw-r--r--drivers/net/i40e/i40e_flow.c519
-rw-r--r--drivers/net/i40e/i40e_pf.c130
-rw-r--r--drivers/net/i40e/i40e_pf.h60
-rw-r--r--drivers/net/i40e/i40e_rxtx.c200
-rw-r--r--drivers/net/i40e/i40e_rxtx.h5
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c6
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c27
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c8
-rw-r--r--drivers/net/i40e/i40e_tm.c108
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c940
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h346
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map13
-rw-r--r--drivers/net/ixgbe/Makefile11
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c208
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h55
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c179
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c737
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.h151
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c61
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c141
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h19
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c53
-rw-r--r--drivers/net/ixgbe/ixgbe_tm.c97
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c60
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h64
-rw-r--r--drivers/net/kni/Makefile3
-rw-r--r--drivers/net/kni/rte_eth_kni.c10
-rw-r--r--drivers/net/liquidio/Makefile3
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.c19
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.h2
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h16
-rw-r--r--drivers/net/liquidio/base/lio_mbox.h1
-rw-r--r--drivers/net/liquidio/lio_ethdev.c156
-rw-r--r--drivers/net/liquidio/lio_rxtx.c20
-rw-r--r--drivers/net/liquidio/lio_rxtx.h6
-rw-r--r--drivers/net/liquidio/lio_struct.h3
-rw-r--r--drivers/net/mlx4/Makefile46
-rw-r--r--drivers/net/mlx4/mlx4.c6139
-rw-r--r--drivers/net/mlx4/mlx4.h401
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c1047
-rw-r--r--drivers/net/mlx4/mlx4_flow.c2062
-rw-r--r--drivers/net/mlx4/mlx4_flow.h73
-rw-r--r--drivers/net/mlx4/mlx4_intr.c397
-rw-r--r--drivers/net/mlx4/mlx4_mr.c293
-rw-r--r--drivers/net/mlx4/mlx4_prm.h177
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c873
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c1071
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h214
-rw-r--r--drivers/net/mlx4/mlx4_txq.c414
-rw-r--r--drivers/net/mlx4/mlx4_utils.c217
-rw-r--r--drivers/net/mlx4/mlx4_utils.h133
-rw-r--r--drivers/net/mlx5/Makefile42
-rw-r--r--drivers/net/mlx5/mlx5.c412
-rw-r--r--drivers/net/mlx5/mlx5.h138
-rw-r--r--drivers/net/mlx5/mlx5_defs.h6
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c441
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c1101
-rw-r--r--drivers/net/mlx5/mlx5_flow.c2811
-rw-r--r--drivers/net/mlx5/mlx5_mac.c410
-rw-r--r--drivers/net/mlx5/mlx5_mr.c283
-rw-r--r--drivers/net/mlx5/mlx5_prm.h48
-rw-r--r--drivers/net/mlx5/mlx5_rss.c139
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c383
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c2133
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c267
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h366
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c388
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h130
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h1039
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h (renamed from drivers/net/mlx5/mlx5_rxtx_vec_sse.c)512
-rw-r--r--drivers/net/mlx5/mlx5_socket.c294
-rw-r--r--drivers/net/mlx5/mlx5_stats.c25
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c350
-rw-r--r--drivers/net/mlx5/mlx5_txq.c885
-rw-r--r--drivers/net/mlx5/mlx5_utils.h2
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c84
-rw-r--r--drivers/net/mrvl/Makefile68
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.c2294
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.h116
-rw-r--r--drivers/net/mrvl/mrvl_qos.c636
-rw-r--r--drivers/net/mrvl/mrvl_qos.h113
-rw-r--r--drivers/net/mrvl/rte_pmd_mrvl_version.map3
-rw-r--r--drivers/net/nfp/Makefile5
-rw-r--r--drivers/net/nfp/nfp_net.c602
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/nfp/nfp_net_eth.h82
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h15
-rw-r--r--drivers/net/nfp/nfp_nfpu.c103
-rw-r--r--drivers/net/nfp/nfp_nfpu.h55
-rw-r--r--drivers/net/nfp/nfp_nspu.c623
-rw-r--r--drivers/net/nfp/nfp_nspu.h83
-rw-r--r--drivers/net/null/Makefile3
-rw-r--r--drivers/net/null/rte_eth_null.c12
-rw-r--r--drivers/net/octeontx/Makefile79
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.c273
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.h150
-rw-r--r--drivers/net/octeontx/base/octeontx_io.h156
-rw-r--r--drivers/net/octeontx/base/octeontx_pki_var.h237
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.c169
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.h553
-rw-r--r--drivers/net/octeontx/base/octeontx_pkovf.c617
-rw-r--r--drivers/net/octeontx/base/octeontx_pkovf.h97
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c1333
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h109
-rw-r--r--drivers/net/octeontx/octeontx_logs.h76
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c127
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.h137
-rw-r--r--drivers/net/octeontx/rte_pmd_octeontx_version.map4
-rw-r--r--drivers/net/pcap/Makefile3
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c11
-rw-r--r--drivers/net/qede/Makefile10
-rw-r--r--drivers/net/qede/base/bcm_osal.c22
-rw-r--r--drivers/net/qede/base/bcm_osal.h36
-rw-r--r--drivers/net/qede/base/common_hsi.h760
-rw-r--r--drivers/net/qede/base/ecore.h232
-rw-r--r--drivers/net/qede/base/ecore_cxt.c150
-rw-r--r--drivers/net/qede/base/ecore_cxt.h6
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c591
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h13
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h46
-rw-r--r--drivers/net/qede/base/ecore_dev.c1115
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h113
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h245
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h6
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h65
-rw-r--r--drivers/net/qede/base/ecore_hw.c25
-rw-r--r--drivers/net/qede/base/ecore_hw.h44
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c512
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h98
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c73
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h3
-rw-r--r--drivers/net/qede/base/ecore_int.c1011
-rw-r--r--drivers/net/qede/base/ecore_int.h73
-rw-r--r--drivers/net/qede/base/ecore_int_api.h47
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h55
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h44
-rw-r--r--drivers/net/qede/base/ecore_l2.c293
-rw-r--r--drivers/net/qede/base/ecore_l2.h82
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h30
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1651
-rw-r--r--drivers/net/qede/base/ecore_mcp.h195
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h190
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c9
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h5
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h858
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h2
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c152
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h33
-rw-r--r--drivers/net/qede/base/ecore_spq.c111
-rw-r--r--drivers/net/qede/base/ecore_spq.h20
-rw-r--r--drivers/net/qede/base/ecore_sriov.c992
-rw-r--r--drivers/net/qede/base/ecore_sriov.h60
-rw-r--r--drivers/net/qede/base/ecore_vf.c447
-rw-r--r--drivers/net/qede/base/ecore_vf.h79
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h13
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h80
-rw-r--r--drivers/net/qede/base/mcp_public.h516
-rw-r--r--drivers/net/qede/base/nvm_cfg.h100
-rw-r--r--drivers/net/qede/base/reg_addr.h17
-rw-r--r--drivers/net/qede/qede_ethdev.c304
-rw-r--r--drivers/net/qede/qede_ethdev.h23
-rw-r--r--drivers/net/qede/qede_fdir.c10
-rw-r--r--drivers/net/qede/qede_if.h17
-rw-r--r--drivers/net/qede/qede_main.c75
-rw-r--r--drivers/net/qede/qede_rxtx.c255
-rw-r--r--drivers/net/qede/qede_rxtx.h23
-rw-r--r--drivers/net/ring/Makefile5
-rw-r--r--drivers/net/ring/rte_eth_ring.c10
-rw-r--r--drivers/net/ring/rte_pmd_ring_version.map (renamed from drivers/net/ring/rte_eth_ring_version.map)0
-rw-r--r--drivers/net/sfc/Makefile8
-rw-r--r--drivers/net/sfc/base/ef10_filter.c288
-rw-r--r--drivers/net/sfc/base/ef10_impl.h25
-rw-r--r--drivers/net/sfc/base/ef10_nic.c10
-rw-r--r--drivers/net/sfc/base/ef10_rx.c96
-rw-r--r--drivers/net/sfc/base/efx.h105
-rw-r--r--drivers/net/sfc/base/efx_filter.c103
-rw-r--r--drivers/net/sfc/base/efx_impl.h20
-rw-r--r--drivers/net/sfc/base/efx_rx.c142
-rw-r--r--drivers/net/sfc/base/hunt_nic.c7
-rw-r--r--drivers/net/sfc/base/medford_nic.c7
-rw-r--r--drivers/net/sfc/base/siena_nic.c3
-rw-r--r--drivers/net/sfc/efsys.h2
-rw-r--r--drivers/net/sfc/sfc.c18
-rw-r--r--drivers/net/sfc/sfc.h7
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h5
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h9
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c13
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c81
-rw-r--r--drivers/net/sfc/sfc_ethdev.c69
-rw-r--r--drivers/net/sfc/sfc_flow.c188
-rw-r--r--drivers/net/sfc/sfc_flow.h15
-rw-r--r--drivers/net/sfc/sfc_rx.c74
-rw-r--r--drivers/net/sfc/sfc_tso.c4
-rw-r--r--drivers/net/sfc/sfc_tweak.h3
-rw-r--r--drivers/net/sfc/sfc_tx.c58
-rw-r--r--drivers/net/softnic/Makefile60
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c851
-rw-r--r--drivers/net/softnic/rte_eth_softnic.h83
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h291
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c3452
-rw-r--r--drivers/net/softnic/rte_pmd_eth_softnic_version.map7
-rw-r--r--drivers/net/szedata2/Makefile3
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c8
-rw-r--r--drivers/net/tap/Makefile3
-rw-r--r--drivers/net/tap/rte_eth_tap.c153
-rw-r--r--drivers/net/tap/rte_eth_tap.h2
-rw-r--r--drivers/net/tap/tap_flow.c20
-rw-r--r--drivers/net/thunderx/Makefile5
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h6
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c22
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h6
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h4
-rw-r--r--drivers/net/thunderx/nicvf_struct.h8
-rw-r--r--drivers/net/vhost/Makefile5
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c28
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h4
-rw-r--r--drivers/net/virtio/Makefile6
-rw-r--r--drivers/net/virtio/virtio_ethdev.c169
-rw-r--r--drivers/net/virtio/virtio_ethdev.h6
-rw-r--r--drivers/net/virtio/virtio_pci.c2
-rw-r--r--drivers/net/virtio/virtio_pci.h6
-rw-r--r--drivers/net/virtio/virtio_rxtx.c83
-rw-r--r--drivers/net/virtio/virtio_rxtx.h12
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c3
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c1
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c1
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c4
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c4
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c2
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c12
-rw-r--r--drivers/net/virtio/virtqueue.c25
-rw-r--r--drivers/net/virtio/virtqueue.h16
-rw-r--r--drivers/net/vmxnet3/Makefile3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c36
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c46
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c766
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt_version.map7
-rw-r--r--drivers/net/xenvirt/rte_mempool_gntalloc.c295
-rw-r--r--drivers/net/xenvirt/rte_xen_lib.c454
-rw-r--r--drivers/net/xenvirt/rte_xen_lib.h116
-rw-r--r--drivers/net/xenvirt/virtqueue.h273
-rw-r--r--examples/Makefile6
-rw-r--r--examples/bond/main.c18
-rw-r--r--examples/cmdline/main.c1
-rw-r--r--examples/distributor/main.c28
-rw-r--r--examples/ethtool/Makefile1
-rw-r--r--examples/ethtool/lib/Makefile1
-rw-r--r--examples/ethtool/lib/rte_ethtool.c44
-rw-r--r--examples/ethtool/lib/rte_ethtool.h42
-rw-r--r--examples/eventdev_pipeline_sw_pmd/main.c40
-rw-r--r--examples/exception_path/main.c39
-rw-r--r--examples/flow_classify/Makefile57
-rw-r--r--examples/flow_classify/flow_classify.c852
-rw-r--r--examples/flow_classify/ipv4_rules_file.txt14
-rw-r--r--examples/flow_filtering/Makefile49
-rw-r--r--examples/flow_filtering/flow_blocks.c150
-rw-r--r--examples/flow_filtering/main.c244
-rw-r--r--examples/helloworld/main.c1
-rw-r--r--examples/ip_fragmentation/main.c35
-rw-r--r--examples/ip_pipeline/Makefile2
-rw-r--r--examples/ip_pipeline/app.h4
-rw-r--r--examples/ip_pipeline/config_parse.c19
-rw-r--r--examples/ip_pipeline/init.c10
-rw-r--r--examples/ip_pipeline/pipeline/hash_func.h178
-rw-r--r--examples/ip_pipeline/pipeline/hash_func_arm64.h261
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c10
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c53
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c10
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c15
-rw-r--r--examples/ip_reassembly/main.c50
-rw-r--r--examples/ipsec-secgw/Makefile6
-rw-r--r--examples/ipsec-secgw/esp.c144
-rw-r--r--examples/ipsec-secgw/esp.h10
-rw-r--r--examples/ipsec-secgw/ipip.h3
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c105
-rw-r--r--examples/ipsec-secgw/ipsec.c306
-rw-r--r--examples/ipsec-secgw/ipsec.h33
-rw-r--r--examples/ipsec-secgw/sa.c154
-rw-r--r--examples/ipv4_multicast/main.c32
-rw-r--r--examples/kni/main.c59
-rw-r--r--examples/l2fwd-cat/Makefile5
-rw-r--r--examples/l2fwd-cat/cat.c85
-rw-r--r--examples/l2fwd-cat/l2fwd-cat.c12
-rw-r--r--examples/l2fwd-crypto/main.c115
-rw-r--r--examples/l2fwd-jobstats/main.c62
-rw-r--r--examples/l2fwd-keepalive/main.c52
-rw-r--r--examples/l2fwd/main.c49
-rw-r--r--examples/l3fwd-acl/main.c41
-rw-r--r--examples/l3fwd-power/main.c76
-rw-r--r--examples/l3fwd-vf/main.c52
-rw-r--r--examples/l3fwd/l3fwd.h10
-rw-r--r--examples/l3fwd/l3fwd_altivec.h284
-rw-r--r--examples/l3fwd/l3fwd_common.h2
-rw-r--r--examples/l3fwd/l3fwd_em.c19
-rw-r--r--examples/l3fwd/l3fwd_em.h6
-rw-r--r--examples/l3fwd/l3fwd_em_hlm.h14
-rw-r--r--examples/l3fwd/l3fwd_em_sequential.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm.c20
-rw-r--r--examples/l3fwd/l3fwd_lpm.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm_altivec.h164
-rw-r--r--examples/l3fwd/l3fwd_lpm_neon.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm_sse.h4
-rw-r--r--examples/l3fwd/l3fwd_neon.h3
-rw-r--r--examples/l3fwd/main.c32
-rw-r--r--examples/link_status_interrupt/main.c26
-rw-r--r--examples/load_balancer/config.c16
-rw-r--r--examples/load_balancer/init.c36
-rw-r--r--examples/load_balancer/main.c2
-rw-r--r--examples/load_balancer/main.h11
-rw-r--r--examples/load_balancer/runtime.c14
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c24
-rw-r--r--examples/multi_process/client_server_mp/mp_server/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/args.c4
-rw-r--r--examples/multi_process/client_server_mp/mp_server/args.h2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/init.c12
-rw-r--r--examples/multi_process/client_server_mp/mp_server/main.c4
-rw-r--r--examples/multi_process/client_server_mp/shared/common.h4
-rw-r--r--examples/multi_process/l2fwd_fork/flib.c2
-rw-r--r--examples/multi_process/l2fwd_fork/main.c30
-rw-r--r--examples/multi_process/simple_mp/main.c5
-rw-r--r--examples/multi_process/simple_mp/mp_commands.c3
-rw-r--r--examples/multi_process/simple_mp/mp_commands.h1
-rw-r--r--examples/multi_process/symmetric_mp/main.c23
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c53
-rw-r--r--examples/netmap_compat/lib/compat_netmap.h2
-rw-r--r--examples/packet_ordering/main.c19
-rw-r--r--examples/performance-thread/common/lthread.h2
-rw-r--r--examples/performance-thread/common/lthread_diag.c3
-rw-r--r--examples/performance-thread/common/lthread_sched.c17
-rw-r--r--examples/performance-thread/common/lthread_tls.c13
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c151
-rw-r--r--examples/performance-thread/pthread_shim/main.c6
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c5
-rw-r--r--examples/ptpclient/ptpclient.c12
-rw-r--r--examples/qos_meter/main.c4
-rw-r--r--examples/qos_sched/args.c12
-rw-r--r--examples/qos_sched/cmdline.c30
-rw-r--r--examples/qos_sched/init.c33
-rw-r--r--examples/qos_sched/main.c14
-rw-r--r--examples/qos_sched/main.h24
-rw-r--r--examples/qos_sched/stats.c16
-rw-r--r--examples/quota_watermark/qw/init.c8
-rw-r--r--examples/quota_watermark/qw/init.h4
-rw-r--r--examples/quota_watermark/qw/main.c14
-rw-r--r--examples/quota_watermark/qw/main.h2
-rw-r--r--examples/rxtx_callbacks/main.c10
-rw-r--r--examples/server_node_efd/node/node.c22
-rw-r--r--examples/server_node_efd/server/Makefile2
-rw-r--r--examples/server_node_efd/server/init.c19
-rw-r--r--examples/server_node_efd/server/main.c4
-rw-r--r--examples/server_node_efd/shared/common.h4
-rw-r--r--examples/service_cores/Makefile (renamed from examples/vhost_xen/Makefile)16
-rw-r--r--examples/service_cores/main.c246
-rw-r--r--examples/skeleton/basicfwd.c12
-rw-r--r--examples/tep_termination/main.c6
-rw-r--r--examples/tep_termination/vxlan_setup.c6
-rw-r--r--examples/tep_termination/vxlan_setup.h10
-rw-r--r--examples/timer/main.c1
-rw-r--r--examples/vhost/main.c10
-rw-r--r--examples/vhost_scsi/scsi.c4
-rw-r--r--examples/vhost_xen/main.c1522
-rw-r--r--examples/vhost_xen/vhost_monitor.c595
-rw-r--r--examples/vhost_xen/virtio-net.h113
-rw-r--r--examples/vhost_xen/xen_vhost.h148
-rw-r--r--examples/vhost_xen/xenstore_parse.c775
-rw-r--r--examples/vm_power_manager/Makefile16
-rw-r--r--examples/vm_power_manager/channel_manager.c67
-rw-r--r--examples/vm_power_manager/channel_manager.h25
-rw-r--r--examples/vm_power_manager/channel_monitor.c336
-rw-r--r--examples/vm_power_manager/channel_monitor.h18
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.c108
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.h6
-rw-r--r--examples/vm_power_manager/main.c261
-rw-r--r--examples/vm_power_manager/power_manager.c68
-rw-r--r--examples/vm_power_manager/power_manager.h65
-rw-r--r--examples/vm_power_manager/vm_power_cli.c21
-rw-r--r--examples/vmdq/main.c8
-rw-r--r--examples/vmdq_dcb/main.c8
-rw-r--r--lib/Makefile18
-rw-r--r--lib/librte_acl/Makefile1
-rw-r--r--lib/librte_acl/rte_acl.c3
-rw-r--r--lib/librte_acl/rte_acl_osdep.h1
-rw-r--r--lib/librte_bitratestats/Makefile3
-rw-r--r--lib/librte_bitratestats/rte_bitrate.c2
-rw-r--r--lib/librte_bitratestats/rte_bitrate.h2
-rw-r--r--lib/librte_cfgfile/Makefile1
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.c441
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.h96
-rw-r--r--lib/librte_cfgfile/rte_cfgfile_version.map11
-rw-r--r--lib/librte_cmdline/Makefile1
-rw-r--r--lib/librte_cmdline/cmdline.c3
-rw-r--r--lib/librte_cmdline/cmdline_parse.c2
-rw-r--r--lib/librte_cryptodev/Makefile6
-rw-r--r--lib/librte_cryptodev/rte_crypto.h6
-rw-r--r--lib/librte_cryptodev/rte_crypto_sym.h42
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c50
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h72
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pci.h92
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.c212
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.h116
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_vdev.h100
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_version.map16
-rw-r--r--lib/librte_distributor/Makefile1
-rw-r--r--lib/librte_distributor/rte_distributor.c2
-rw-r--r--lib/librte_distributor/rte_distributor_v20.c2
-rw-r--r--lib/librte_eal/bsdapp/eal/Makefile11
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c112
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_interrupts.c35
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_memory.c21
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_thread.c1
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_timer.c1
-rw-r--r--lib/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h107
-rw-r--r--lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h137
-rw-r--r--lib/librte_eal/bsdapp/eal/rte_eal_version.map239
-rw-r--r--lib/librte_eal/common/Makefile10
-rw-r--r--lib/librte_eal/common/arch/arm/rte_cpuflags.c2
-rw-r--r--lib/librte_eal/common/arch/arm/rte_cycles.c45
-rw-r--r--lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c2
-rw-r--r--lib/librte_eal/common/arch/ppc_64/rte_cycles.c52
-rw-r--r--lib/librte_eal/common/arch/x86/rte_cpuflags.c40
-rw-r--r--lib/librte_eal/common/arch/x86/rte_cycles.c152
-rw-r--r--lib/librte_eal/common/arch/x86/rte_memcpy.c58
-rw-r--r--lib/librte_eal/common/arch/x86/rte_spinlock.c3
-rw-r--r--lib/librte_eal/common/eal_common_bus.c49
-rw-r--r--lib/librte_eal/common/eal_common_dev.c22
-rw-r--r--lib/librte_eal/common/eal_common_errno.c22
-rw-r--r--lib/librte_eal/common/eal_common_launch.c1
-rw-r--r--lib/librte_eal/common/eal_common_log.c43
-rw-r--r--lib/librte_eal/common/eal_common_memory.c5
-rw-r--r--lib/librte_eal/common/eal_common_memzone.c6
-rw-r--r--lib/librte_eal/common/eal_common_options.c11
-rw-r--r--lib/librte_eal/common/eal_common_tailqs.c1
-rw-r--r--lib/librte_eal/common/eal_common_thread.c14
-rw-r--r--lib/librte_eal/common/eal_common_timer.c8
-rw-r--r--lib/librte_eal/common/eal_internal_cfg.h3
-rw-r--r--lib/librte_eal/common/eal_options.h4
-rw-r--r--lib/librte_eal/common/eal_private.h155
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_vect.h2
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic_32.h2
-rw-r--r--lib/librte_eal/common/include/rte_bitmap.h (renamed from lib/librte_sched/rte_bitmap.h)0
-rw-r--r--lib/librte_eal/common/include/rte_bus.h42
-rw-r--r--lib/librte_eal/common/include/rte_common.h23
-rw-r--r--lib/librte_eal/common/include/rte_debug.h2
-rw-r--r--lib/librte_eal/common/include/rte_dev.h31
-rw-r--r--lib/librte_eal/common/include/rte_eal.h52
-rw-r--r--lib/librte_eal/common/include/rte_eal_interrupts.h (renamed from lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h)25
-rw-r--r--lib/librte_eal/common/include/rte_interrupts.h2
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h14
-rw-r--r--lib/librte_eal/common/include/rte_log.h30
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h17
-rw-r--r--lib/librte_eal/common/include/rte_memory.h103
-rw-r--r--lib/librte_eal/common/include/rte_memzone.h6
-rw-r--r--lib/librte_eal/common/include/rte_pci.h598
-rw-r--r--lib/librte_eal/common/include/rte_service.h197
-rw-r--r--lib/librte_eal/common/include/rte_service_component.h36
-rw-r--r--lib/librte_eal/common/include/rte_version.h6
-rw-r--r--lib/librte_eal/common/include/rte_vfio.h153
-rw-r--r--lib/librte_eal/common/malloc_elem.c8
-rw-r--r--lib/librte_eal/common/malloc_elem.h4
-rw-r--r--lib/librte_eal/common/rte_malloc.c21
-rw-r--r--lib/librte_eal/common/rte_service.c362
-rw-r--r--lib/librte_eal/linuxapp/Makefile2
-rw-r--r--lib/librte_eal/linuxapp/eal/Makefile20
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c117
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_alarm.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_hugepage_info.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c21
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_log.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c99
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_thread.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_timer.c1
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.c75
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.h49
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c7
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_xen_memory.c381
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h108
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/compat.h21
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/igb_uio.c315
-rw-r--r--lib/librte_eal/linuxapp/kni/compat.h31
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h24
-rw-r--r--lib/librte_eal/linuxapp/xen_dom0/compat.h15
-rw-r--r--lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h107
-rw-r--r--lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c780
-rw-r--r--lib/librte_eal/rte_eal_version.map (renamed from lib/librte_eal/linuxapp/eal/rte_eal_version.map)72
-rw-r--r--lib/librte_efd/Makefile1
-rw-r--r--lib/librte_efd/rte_efd.c3
-rw-r--r--lib/librte_ether/Makefile10
-rw-r--r--lib/librte_ether/ethdev_profile.c164
-rw-r--r--lib/librte_ether/ethdev_profile.h (renamed from examples/vhost_xen/main.h)50
-rw-r--r--lib/librte_ether/rte_ethdev.c960
-rw-r--r--lib/librte_ether/rte_ethdev.h434
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h1
-rw-r--r--lib/librte_ether/rte_ethdev_vdev.h2
-rw-r--r--lib/librte_ether/rte_ethdev_version.map (renamed from lib/librte_ether/rte_ether_version.map)28
-rw-r--r--lib/librte_ether/rte_flow.c63
-rw-r--r--lib/librte_ether/rte_flow.h175
-rw-r--r--lib/librte_ether/rte_flow_driver.h40
-rw-r--r--lib/librte_ether/rte_mtr.c229
-rw-r--r--lib/librte_ether/rte_mtr.h730
-rw-r--r--lib/librte_ether/rte_mtr_driver.h221
-rw-r--r--lib/librte_ether/rte_tm.c62
-rw-r--r--lib/librte_ether/rte_tm.h60
-rw-r--r--lib/librte_ether/rte_tm_driver.h2
-rw-r--r--lib/librte_eventdev/Makefile5
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c1240
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.h444
-rw-r--r--lib/librte_eventdev/rte_eventdev.c286
-rw-r--r--lib/librte_eventdev/rte_eventdev.h312
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h182
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_pci.h1
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd_vdev.h2
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map27
-rw-r--r--lib/librte_flow_classify/Makefile53
-rw-r--r--lib/librte_flow_classify/rte_flow_classify.c691
-rw-r--r--lib/librte_flow_classify/rte_flow_classify.h289
-rw-r--r--lib/librte_flow_classify/rte_flow_classify_parse.c546
-rw-r--r--lib/librte_flow_classify/rte_flow_classify_parse.h74
-rw-r--r--lib/librte_flow_classify/rte_flow_classify_version.map12
-rw-r--r--lib/librte_gro/Makefile1
-rw-r--r--lib/librte_gro/rte_gro_version.map4
-rw-r--r--lib/librte_gso/Makefile54
-rw-r--r--lib/librte_gso/gso_common.c153
-rw-r--r--lib/librte_gso/gso_common.h171
-rw-r--r--lib/librte_gso/gso_tcp4.c102
-rw-r--r--lib/librte_gso/gso_tcp4.h74
-rw-r--r--lib/librte_gso/gso_tunnel_tcp4.c126
-rw-r--r--lib/librte_gso/gso_tunnel_tcp4.h75
-rw-r--r--lib/librte_gso/rte_gso.c110
-rw-r--r--lib/librte_gso/rte_gso.h148
-rw-r--r--lib/librte_gso/rte_gso_version.map7
-rw-r--r--lib/librte_hash/Makefile1
-rw-r--r--lib/librte_hash/rte_crc_arm64.h3
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c14
-rw-r--r--lib/librte_hash/rte_fbk_hash.c1
-rw-r--r--lib/librte_hash/rte_hash_crc.h3
-rw-r--r--lib/librte_hash/rte_jhash.h5
-rw-r--r--lib/librte_hash/rte_thash.h22
-rw-r--r--lib/librte_ip_frag/Makefile4
-rw-r--r--lib/librte_ip_frag/rte_ip_frag_version.map (renamed from lib/librte_ip_frag/rte_ipfrag_version.map)0
-rw-r--r--lib/librte_jobstats/Makefile1
-rw-r--r--lib/librte_jobstats/rte_jobstats.h2
-rw-r--r--lib/librte_kni/Makefile1
-rw-r--r--lib/librte_kni/rte_kni.c2
-rw-r--r--lib/librte_kni/rte_kni.h8
-rw-r--r--lib/librte_kvargs/Makefile1
-rw-r--r--lib/librte_latencystats/Makefile1
-rw-r--r--lib/librte_latencystats/rte_latencystats.c12
-rw-r--r--lib/librte_lpm/Makefile1
-rw-r--r--lib/librte_lpm/rte_lpm.c7
-rw-r--r--lib/librte_lpm/rte_lpm6.c6
-rw-r--r--lib/librte_mbuf/Makefile1
-rw-r--r--lib/librte_mbuf/rte_mbuf.c29
-rw-r--r--lib/librte_mbuf/rte_mbuf.h125
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.c3
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h43
-rw-r--r--lib/librte_member/Makefile (renamed from lib/librte_eal/linuxapp/xen_dom0/Makefile)33
-rw-r--r--lib/librte_member/rte_member.c336
-rw-r--r--lib/librte_member/rte_member.h513
-rw-r--r--lib/librte_member/rte_member_ht.c586
-rw-r--r--lib/librte_member/rte_member_ht.h94
-rw-r--r--lib/librte_member/rte_member_vbf.c350
-rw-r--r--lib/librte_member/rte_member_vbf.h82
-rw-r--r--lib/librte_member/rte_member_version.map16
-rw-r--r--lib/librte_member/rte_member_x86.h107
-rw-r--r--lib/librte_mempool/Makefile3
-rw-r--r--lib/librte_mempool/rte_mempool.c177
-rw-r--r--lib/librte_mempool/rte_mempool.h194
-rw-r--r--lib/librte_mempool/rte_mempool_ops.c29
-rw-r--r--lib/librte_mempool/rte_mempool_version.map10
-rw-r--r--lib/librte_meter/Makefile1
-rw-r--r--lib/librte_metrics/Makefile1
-rw-r--r--lib/librte_metrics/rte_metrics.c2
-rw-r--r--lib/librte_net/Makefile3
-rw-r--r--lib/librte_net/rte_esp.h (renamed from drivers/net/xenvirt/rte_eth_xenvirt.h)35
-rw-r--r--lib/librte_net/rte_ether.h2
-rw-r--r--lib/librte_net/rte_net.c1
-rw-r--r--lib/librte_net/rte_net_crc.c3
-rw-r--r--lib/librte_pci/Makefile49
-rw-r--r--lib/librte_pci/rte_pci.c212
-rw-r--r--lib/librte_pci/rte_pci.h263
-rw-r--r--lib/librte_pci/rte_pci_version.map15
-rw-r--r--lib/librte_pdump/Makefile3
-rw-r--r--lib/librte_pdump/rte_pdump.c16
-rw-r--r--lib/librte_pdump/rte_pdump.h4
-rw-r--r--lib/librte_pipeline/Makefile2
-rw-r--r--lib/librte_pipeline/rte_pipeline.c1
-rw-r--r--lib/librte_port/Makefile5
-rw-r--r--lib/librte_port/rte_port_ethdev.c6
-rw-r--r--lib/librte_port/rte_port_ethdev.h6
-rw-r--r--lib/librte_power/Makefile1
-rw-r--r--lib/librte_power/channel_commands.h44
-rw-r--r--lib/librte_power/guest_channel.c7
-rw-r--r--lib/librte_power/guest_channel.h15
-rw-r--r--lib/librte_power/rte_power.c9
-rw-r--r--lib/librte_power/rte_power.h41
-rw-r--r--lib/librte_power/rte_power_acpi_cpufreq.c111
-rw-r--r--lib/librte_power/rte_power_acpi_cpufreq.h40
-rw-r--r--lib/librte_power/rte_power_kvm_vm.c19
-rw-r--r--lib/librte_power/rte_power_kvm_vm.h35
-rw-r--r--lib/librte_power/rte_power_version.map10
-rw-r--r--lib/librte_reorder/Makefile1
-rw-r--r--lib/librte_reorder/rte_reorder.c1
-rw-r--r--lib/librte_ring/Makefile1
-rw-r--r--lib/librte_ring/rte_ring.h2
-rw-r--r--lib/librte_sched/Makefile4
-rw-r--r--lib/librte_sched/rte_sched.c2
-rw-r--r--lib/librte_security/Makefile54
-rw-r--r--lib/librte_security/rte_security.c149
-rw-r--r--lib/librte_security/rte_security.h529
-rw-r--r--lib/librte_security/rte_security_driver.h156
-rw-r--r--lib/librte_security/rte_security_version.map14
-rw-r--r--lib/librte_table/Makefile7
-rw-r--r--lib/librte_table/rte_table_hash.h305
-rw-r--r--lib/librte_table/rte_table_hash_cuckoo.c205
-rw-r--r--lib/librte_table/rte_table_hash_ext.c417
-rw-r--r--lib/librte_table/rte_table_hash_key16.c750
-rw-r--r--lib/librte_table/rte_table_hash_key32.c436
-rw-r--r--lib/librte_table/rte_table_hash_key8.c716
-rw-r--r--lib/librte_table/rte_table_hash_lru.c516
-rw-r--r--lib/librte_table/rte_table_version.map26
-rw-r--r--lib/librte_timer/Makefile1
-rw-r--r--lib/librte_timer/rte_timer.c6
-rw-r--r--lib/librte_vhost/Makefile5
-rw-r--r--lib/librte_vhost/fd_man.c5
-rw-r--r--lib/librte_vhost/iotlb.c350
-rw-r--r--lib/librte_vhost/iotlb.h76
-rw-r--r--lib/librte_vhost/rte_vhost.h6
-rw-r--r--lib/librte_vhost/socket.c37
-rw-r--r--lib/librte_vhost/vhost.c131
-rw-r--r--lib/librte_vhost/vhost.h76
-rw-r--r--lib/librte_vhost/vhost_user.c370
-rw-r--r--lib/librte_vhost/vhost_user.h25
-rw-r--r--lib/librte_vhost/virtio_net.c339
-rw-r--r--mk/machine/dpaa/rte.vars.mk61
-rw-r--r--mk/machine/silvermont/rte.vars.mk (renamed from mk/machine/atm/rte.vars.mk)2
-rw-r--r--mk/rte.app.mk40
-rw-r--r--mk/rte.combinedlib.mk2
-rw-r--r--mk/rte.extsubdir.mk1
-rw-r--r--mk/rte.hostapp.mk4
-rw-r--r--mk/rte.lib.mk15
-rw-r--r--mk/rte.sdkdoc.mk2
-rw-r--r--mk/rte.shared.mk4
-rw-r--r--mk/rte.subdir.mk3
-rw-r--r--pkg/dpdk.spec6
-rw-r--r--test/test-pipeline/config.c1
-rw-r--r--test/test-pipeline/init.c9
-rw-r--r--test/test-pipeline/main.c1
-rw-r--r--test/test-pipeline/main.h5
-rw-r--r--test/test-pipeline/pipeline_hash.c107
-rw-r--r--test/test-pipeline/runtime.c1
-rw-r--r--test/test/Makefile8
-rw-r--r--test/test/packet_burst_generator.c191
-rw-r--r--test/test/packet_burst_generator.h22
-rw-r--r--test/test/process.h10
-rw-r--r--test/test/test.c5
-rw-r--r--test/test/test_atomic.c1
-rw-r--r--test/test/test_bitmap.c192
-rw-r--r--test/test/test_cfgfile.c40
-rw-r--r--test/test/test_cfgfiles/etc/realloc_sections.ini128
-rw-r--r--test/test/test_cpuflags.c3
-rw-r--r--test/test/test_cryptodev.c1016
-rw-r--r--test/test/test_cryptodev.h9
-rw-r--r--test/test/test_cryptodev_aead_test_vectors.h (renamed from test/test/test_cryptodev_gcm_test_vectors.h)516
-rw-r--r--test/test/test_cryptodev_aes_test_vectors.h126
-rw-r--r--test/test/test_cryptodev_blockcipher.c53
-rw-r--r--test/test/test_cryptodev_blockcipher.h2
-rw-r--r--test/test/test_cryptodev_des_test_vectors.h70
-rw-r--r--test/test/test_cryptodev_hash_test_vectors.h12
-rw-r--r--test/test/test_cryptodev_perf.c4932
-rw-r--r--test/test/test_eal_flags.c81
-rw-r--r--test/test/test_event_eth_rx_adapter.c454
-rw-r--r--test/test/test_eventdev.c312
-rw-r--r--test/test/test_eventdev_octeontx.c144
-rw-r--r--test/test/test_eventdev_sw.c90
-rw-r--r--test/test/test_flow_classify.c672
-rw-r--r--test/test/test_flow_classify.h234
-rw-r--r--test/test/test_func_reentrancy.c1
-rw-r--r--test/test/test_hash.c1
-rw-r--r--test/test/test_kni.c13
-rw-r--r--test/test/test_link_bonding.c42
-rw-r--r--test/test/test_link_bonding_mode4.c18
-rw-r--r--test/test/test_link_bonding_rssconf.c8
-rw-r--r--test/test/test_logs.c11
-rw-r--r--test/test/test_malloc.c13
-rw-r--r--test/test/test_mbuf.c3
-rw-r--r--test/test/test_member.c744
-rw-r--r--test/test/test_member_perf.c654
-rw-r--r--test/test/test_memcpy_perf.c51
-rw-r--r--test/test/test_mempool.c30
-rw-r--r--test/test/test_mempool_perf.c5
-rw-r--r--test/test/test_memzone.c80
-rw-r--r--test/test/test_per_lcore.c1
-rw-r--r--test/test/test_pmd_perf.c45
-rw-r--r--test/test/test_pmd_ring_perf.c2
-rw-r--r--test/test/test_ring.c60
-rw-r--r--test/test/test_rwlock.c1
-rw-r--r--test/test/test_service_cores.c268
-rw-r--r--test/test/test_spinlock.c1
-rw-r--r--test/test/test_table.c1
-rw-r--r--test/test/test_table.h3
-rw-r--r--test/test/test_table_combined.c137
-rw-r--r--test/test/test_table_tables.c150
-rw-r--r--test/test/test_timer.c1
-rw-r--r--test/test/virtual_pmd.c29
-rw-r--r--test/test/virtual_pmd.h28
-rwxr-xr-xusertools/dpdk-devbind.py3
1252 files changed, 127967 insertions, 51032 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a0cd75e1..8d45ad0b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -21,33 +21,37 @@ Descriptions of section entries:
General Project Administration
------------------------------
+
+Main Branch
M: Thomas Monjalon <thomas@monjalon.net>
+M: Ferruh Yigit <ferruh.yigit@intel.com>
T: git://dpdk.org/dpdk
-F: MAINTAINERS
-F: devtools/check-dup-includes.sh
-F: devtools/check-maintainers.sh
-F: devtools/check-git-log.sh
-F: devtools/check-includes.sh
-F: devtools/checkpatches.sh
-F: devtools/git-log-fixes.sh
-F: devtools/load-devel-config
-F: devtools/test-build.sh
Stable Branches
----------------
+M: Yuanhan Liu <yliu@fridaylinux.org>
T: git://dpdk.org/dpdk-stable
Security Issues
----------------
M: maintainers@dpdk.org
-
Documentation (with overlaps)
--------------
M: John McNamara <john.mcnamara@intel.com>
F: README
F: doc/
+Developers and Maintainers Tools
+M: Thomas Monjalon <thomas@monjalon.net>
+F: MAINTAINERS
+F: devtools/check-dup-includes.sh
+F: devtools/check-maintainers.sh
+F: devtools/check-git-log.sh
+F: devtools/check-includes.sh
+F: devtools/checkpatches.sh
+F: devtools/get-maintainer.sh
+F: devtools/git-log-fixes.sh
+F: devtools/load-devel-config
+F: devtools/test-build.sh
+
Build System
------------
@@ -85,6 +89,7 @@ EAL API and common code
F: lib/librte_eal/common/*
F: lib/librte_eal/common/include/*
F: lib/librte_eal/common/include/generic/
+F: lib/librte_eal/rte_eal_version.map
F: doc/guides/prog_guide/env_abstraction_layer.rst
F: test/test/test_alarm.c
F: test/test/test_atomic.c
@@ -143,15 +148,20 @@ F: lib/librte_eal/common/rte_service.c
F: doc/guides/prog_guide/service_cores.rst
F: test/test/test_service_cores.c
+Bitmap
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+F: lib/librte_eal/common/include/rte_bitmap.h
+F: test/test/test_bitmap.c
+
ARM v7
M: Jan Viktorin <viktorin@rehivetech.com>
-M: Jianbo Liu <jianbo.liu@linaro.org>
+M: Jianbo Liu <jianbo.liu@arm.com>
F: lib/librte_eal/common/arch/arm/
F: lib/librte_eal/common/include/arch/arm/
ARM v8
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
-M: Jianbo Liu <jianbo.liu@linaro.org>
+M: Jianbo Liu <jianbo.liu@arm.com>
F: lib/librte_eal/common/include/arch/arm/*_64.h
F: lib/librte_net/net_crc_neon.h
F: lib/librte_acl/acl_run_neon.*
@@ -168,6 +178,7 @@ M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
F: lib/librte_eal/common/arch/ppc_64/
F: lib/librte_eal/common/include/arch/ppc_64/
F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
+F: examples/l3fwd/*altivec.h
Intel x86
M: Bruce Richardson <bruce.richardson@intel.com>
@@ -183,21 +194,12 @@ F: doc/guides/linux_gsg/
Linux UIO
M: Ferruh Yigit <ferruh.yigit@intel.com>
F: lib/librte_eal/linuxapp/igb_uio/
-F: lib/librte_eal/linuxapp/eal/*uio*
+F: drivers/bus/pci/linux/*uio*
Linux VFIO
M: Anatoly Burakov <anatoly.burakov@intel.com>
F: lib/librte_eal/linuxapp/eal/*vfio*
-
-Linux Xen
-M: Jianfeng Tan <jianfeng.tan@intel.com>
-F: lib/librte_eal/linuxapp/xen_dom0/
-F: lib/librte_eal/linuxapp/eal/*xen*
-F: lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
-F: drivers/net/xenvirt/
-F: doc/guides/xen/
-F: examples/vhost_xen/
-F: doc/guides/nics/features/xenvirt.ini
+F: drivers/bus/pci/linux/*vfio*
FreeBSD EAL (with overlaps)
M: Bruce Richardson <bruce.richardson@intel.com>
@@ -257,19 +259,46 @@ M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
T: git://dpdk.org/next/dpdk-next-tm
F: lib/librte_ether/rte_tm*
+Traffic Metering and Policing API - EXPERIMENTAL
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+F: lib/librte_ether/rte_mtr*
+
Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
F: test/test/test_cryptodev*
F: examples/l2fwd-crypto/
-Eventdev API - EXPERIMENTAL
+Security API - EXPERIMENTAL
+M: Akhil Goyal <akhil.goyal@nxp.com>
+M: Declan Doherty <declan.doherty@intel.com>
+F: lib/librte_security/
+F: doc/guides/prog_guide/rte_security.rst
+
+Eventdev API
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
T: git://dpdk.org/next/dpdk-next-eventdev
F: lib/librte_eventdev/
F: drivers/event/skeleton/
F: test/test/test_eventdev.c
+Eventdev Ethdev Rx Adapter API - EXPERIMENTAL
+M: Nikhil Rao <nikhil.rao@intel.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+F: lib/librte_eventdev/*eth_rx_adapter*
+F: test/test/test_event_eth_rx_adapter.c
+F: doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+
+
+Bus Drivers
+-----------
+
+PCI bus driver
+F: drivers/bus/pci/
+
+VDEV bus driver
+F: drivers/bus/vdev/
+
Networking Drivers
------------------
@@ -337,6 +366,14 @@ F: drivers/net/liquidio/
F: doc/guides/nics/liquidio.rst
F: doc/guides/nics/features/liquidio.ini
+Cavium OcteonTX
+M: Santosh Shukla <santosh.shukla@caviumnetworks.com>
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+F: drivers/mempool/octeontx/
+F: drivers/net/octeontx/
+F: doc/guides/nics/octeontx.rst
+F: doc/guides/nics/features/octeontx.ini
+
Chelsio cxgbe
M: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
F: drivers/net/cxgbe/
@@ -389,10 +426,21 @@ F: doc/guides/nics/features/mlx4.ini
Mellanox mlx5
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
+M: Yongseok Koh <yskoh@mellanox.com>
F: drivers/net/mlx5/
F: doc/guides/nics/mlx5.rst
F: doc/guides/nics/features/mlx5.ini
+Marvell mrvl
+M: Jacek Siuda <jck@semihalf.com>
+M: Tomasz Duszynski <tdu@semihalf.com>
+M: Dmitri Epshtein <dima@marvell.com>
+M: Natalie Samsonov <nsamsono@marvell.com>
+M: Jianbo Liu <jianbo.liu@arm.com>
+F: drivers/net/mrvl/
+F: doc/guides/nics/mrvl.rst
+F: doc/guides/nics/features/mrvl.ini
+
Netcope szedata2
M: Matej Vido <vido@cesnet.cz>
F: drivers/net/szedata2/
@@ -403,7 +451,16 @@ Netronome nfp
M: Alejandro Lucero <alejandro.lucero@netronome.com>
F: drivers/net/nfp/
F: doc/guides/nics/nfp.rst
-F: doc/guides/nics/features/nfp.ini
+F: doc/guides/nics/features/nfp*.ini
+
+NXP dpaa
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+M: Shreyansh Jain <shreyansh.jain@nxp.com>
+F: drivers/bus/dpaa/
+F: drivers/mempool/dpaa/
+F: drivers/net/dpaa/
+F: doc/guides/nics/dpaa.rst
+F: doc/guides/nics/features/dpaa.ini
NXP dpaa2
M: Hemant Agrawal <hemant.agrawal@nxp.com>
@@ -511,6 +568,11 @@ M: Gaetan Rivet <gaetan.rivet@6wind.com>
F: drivers/net/failsafe/
F: doc/guides/nics/fail_safe.rst
+Softnic PMD
+M: Jasvinder Singh <jasvinder.singh@intel.com>
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+F: drivers/net/softnic/
+
Crypto Drivers
--------------
@@ -518,13 +580,18 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
T: git://dpdk.org/next/dpdk-next-crypto
F: doc/guides/cryptodevs/features/default.ini
-ARMv8 Crypto PMD
+ARMv8 Crypto
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
F: drivers/crypto/armv8/
F: doc/guides/cryptodevs/armv8.rst
F: doc/guides/cryptodevs/features/armv8.ini
-Intel AES-NI GCM PMD
+Crypto Scheduler
+M: Fan Zhang <roy.fan.zhang@intel.com>
+F: drivers/crypto/scheduler/
+F: doc/guides/cryptodevs/scheduler.rst
+
+Intel AES-NI GCM
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/aesni_gcm/
F: doc/guides/cryptodevs/aesni_gcm.rst
@@ -544,6 +611,35 @@ F: drivers/crypto/qat/
F: doc/guides/cryptodevs/qat.rst
F: doc/guides/cryptodevs/features/qat.ini
+KASUMI
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+F: drivers/crypto/kasumi/
+F: doc/guides/cryptodevs/kasumi.rst
+F: doc/guides/cryptodevs/features/kasumi.ini
+
+Marvell Mrvl
+M: Jacek Siuda <jck@semihalf.com>
+M: Tomasz Duszynski <tdu@semihalf.com>
+M: Dmitri Epshtein <dima@marvell.com>
+M: Natalie Samsonov <nsamsono@marvell.com>
+M: Jianbo Liu <jianbo.liu@arm.org>
+F: drivers/crypto/mrvl/
+F: doc/guides/cryptodevs/mrvl.rst
+F: doc/guides/cryptodevs/features/mrvl.ini
+
+Null Crypto
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/null/
+F: doc/guides/cryptodevs/null.rst
+F: doc/guides/cryptodevs/features/null.ini
+
+NXP DPAA_SEC
+M: Akhil Goyal <akhil.goyal@nxp.com>
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+F: drivers/crypto/dpaa_sec/
+F: doc/guides/cryptodevs/dpaa_sec.rst
+F: doc/guides/cryptodevs/features/dpaa_sec.ini
+
NXP DPAA2_SEC
M: Akhil Goyal <akhil.goyal@nxp.com>
M: Hemant Agrawal <hemant.agrawal@nxp.com>
@@ -551,41 +647,24 @@ F: drivers/crypto/dpaa2_sec/
F: doc/guides/cryptodevs/dpaa2_sec.rst
F: doc/guides/cryptodevs/features/dpaa2_sec.ini
-SNOW 3G PMD
+OpenSSL
+M: Declan Doherty <declan.doherty@intel.com>
+F: drivers/crypto/openssl/
+F: doc/guides/cryptodevs/openssl.rst
+F: doc/guides/cryptodevs/features/openssl.ini
+
+SNOW 3G
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/snow3g/
F: doc/guides/cryptodevs/snow3g.rst
F: doc/guides/cryptodevs/features/snow3g.ini
-KASUMI PMD
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/kasumi/
-F: doc/guides/cryptodevs/kasumi.rst
-F: doc/guides/cryptodevs/features/kasumi.ini
-
-ZUC PMD
+ZUC
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/zuc/
F: doc/guides/cryptodevs/zuc.rst
F: doc/guides/cryptodevs/features/zuc.ini
-OpenSSL PMD
-M: Declan Doherty <declan.doherty@intel.com>
-F: drivers/crypto/openssl/
-F: doc/guides/cryptodevs/openssl.rst
-F: doc/guides/cryptodevs/features/openssl.ini
-
-Null Crypto PMD
-M: Declan Doherty <declan.doherty@intel.com>
-F: drivers/crypto/null/
-F: doc/guides/cryptodevs/null.rst
-F: doc/guides/cryptodevs/features/null.ini
-
-Crypto Scheduler PMD
-M: Fan Zhang <roy.fan.zhang@intel.com>
-F: drivers/crypto/scheduler/
-F: doc/guides/cryptodevs/scheduler.rst
-
Eventdev Drivers
----------------
@@ -641,6 +720,20 @@ M: Jiayu Hu <jiayu.hu@intel.com>
F: lib/librte_gro/
F: doc/guides/prog_guide/generic_receive_offload_lib.rst
+Generic Segmentation Offload
+M: Jiayu Hu <jiayu.hu@intel.com>
+M: Mark Kavanagh <mark.b.kavanagh@intel.com>
+F: lib/librte_gso/
+F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+
+Flow Classify - EXPERIMENTAL
+M: Bernard Iremonger <bernard.iremonger@intel.com>
+F: lib/librte_flow_classify/
+F: test/test/test_flow_classify*
+F: doc/guides/prog_guide/flow_classify_lib.rst
+F: examples/flow_classify/
+F: doc/guides/sample_app_ug/flow_classify.rst
+
Distributor
M: Bruce Richardson <bruce.richardson@intel.com>
M: David Hunt <david.hunt@intel.com>
@@ -674,7 +767,6 @@ F: doc/guides/prog_guide/pdump_lib.rst
F: app/pdump/
F: doc/guides/tools/pdump.rst
-
Packet Framework
----------------
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
@@ -726,6 +818,13 @@ F: test/test/test_lpm*
F: test/test/test_func_reentrancy.c
F: test/test/test_xmmt_ops.h
+Membership - EXPERIMENTAL
+M: Yipeng Wang <yipeng1.wang@intel.com>
+M: Sameh Gobriel <sameh.gobriel@intel.com>
+F: lib/librte_member/
+F: doc/guides/prog_guide/member_lib.rst
+F: test/test/test_member*
+
Traffic metering
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_meter/
@@ -734,7 +833,6 @@ F: test/test/test_meter.c
F: examples/qos_meter/
F: doc/guides/sample_app_ug/qos_metering.rst
-
Other libraries
---------------
@@ -757,6 +855,10 @@ M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_kvargs/
F: test/test/test_kvargs.c
+PCI
+M: Gaetan Rivet <gaetan.rivet@6wind.com>
+F: lib/librte_pci/
+
Power management
M: David Hunt <david.hunt@intel.com>
F: lib/librte_power/
@@ -847,12 +949,17 @@ F: doc/guides/sample_app_ug/ethtool.rst
F: examples/exception_path/
F: doc/guides/sample_app_ug/exception_path.rst
+M: Ori Kam <orika@mellanox.com>
+F: examples/flow_filtering/
+F: doc/guides/sample_app_ug/flow_filtering.rst
+
M: Bruce Richardson <bruce.richardson@intel.com>
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: examples/helloworld/
F: doc/guides/sample_app_ug/hello_world.rst
M: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
+M: Radu Nicolau <radu.nicolau@intel.com>
F: examples/ipsec-secgw/
F: doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -900,6 +1007,10 @@ M: John McNamara <john.mcnamara@intel.com>
F: examples/rxtx_callbacks/
F: doc/guides/sample_app_ug/rxtx_callbacks.rst
+M: Harry van Haaren <harry.van.haaren@intel.com>
+F: examples/service_cores/
+F: doc/guides/sample_app_ug/service_cores.rst
+
M: Bruce Richardson <bruce.richardson@intel.com>
M: John McNamara <john.mcnamara@intel.com>
F: examples/skeleton/
diff --git a/app/pdump/main.c b/app/pdump/main.c
index 3b13753d..66272f59 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -131,7 +131,7 @@ struct pdump_stats {
struct pdump_tuples {
/* cli params */
- uint8_t port;
+ uint16_t port;
char *device_id;
uint16_t queue;
char rx_dev[TX_STREAM_SIZE];
@@ -579,7 +579,7 @@ signal_handler(int sig_num)
}
static inline int
-configure_vdev(uint8_t port_id)
+configure_vdev(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 0, txRings = 1;
@@ -609,7 +609,7 @@ configure_vdev(uint8_t port_id)
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port_id,
+ port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -623,7 +623,7 @@ static void
create_mp_ring_vdev(void)
{
int i;
- uint8_t portid;
+ uint16_t portid;
struct pdump_tuples *pt = NULL;
struct rte_mempool *mbuf_pool = NULL;
char vdev_args[SIZE];
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 8b753a2e..64fbbd0f 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -310,7 +310,7 @@ meminfo_display(void)
}
static void
-nic_stats_display(uint8_t port_id)
+nic_stats_display(uint16_t port_id)
{
struct rte_eth_stats stats;
uint8_t i;
@@ -349,7 +349,7 @@ nic_stats_display(uint8_t port_id)
}
static void
-nic_stats_clear(uint8_t port_id)
+nic_stats_clear(uint16_t port_id)
{
printf("\n Clearing NIC stats for port %d\n", port_id);
rte_eth_stats_reset(port_id);
@@ -411,7 +411,7 @@ static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len,
}
static void
-nic_xstats_by_name_display(uint8_t port_id, char *name)
+nic_xstats_by_name_display(uint16_t port_id, char *name)
{
uint64_t id;
@@ -426,7 +426,7 @@ nic_xstats_by_name_display(uint8_t port_id, char *name)
}
static void
-nic_xstats_by_ids_display(uint8_t port_id, uint64_t *ids, int len)
+nic_xstats_by_ids_display(uint16_t port_id, uint64_t *ids, int len)
{
struct rte_eth_xstat_name *xstats_names;
uint64_t *values;
@@ -473,7 +473,7 @@ err:
}
static void
-nic_xstats_display(uint8_t port_id)
+nic_xstats_display(uint16_t port_id)
{
struct rte_eth_xstat_name *xstats_names;
uint64_t *values;
@@ -524,7 +524,9 @@ nic_xstats_display(uint8_t port_id)
sprintf(buf, "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
PRIu64"\n", host_id, port_id, counter_type,
xstats_names[i].name, values[i]);
- write(stdout_fd, buf, strlen(buf));
+ ret = write(stdout_fd, buf, strlen(buf));
+ if (ret < 0)
+ goto err;
} else {
printf("%s: %"PRIu64"\n", xstats_names[i].name,
values[i]);
@@ -539,7 +541,7 @@ err:
}
static void
-nic_xstats_clear(uint8_t port_id)
+nic_xstats_clear(uint16_t port_id)
{
printf("\n Clearing NIC xstats for port %d\n", port_id);
rte_eth_xstats_reset(port_id);
@@ -616,7 +618,7 @@ main(int argc, char **argv)
char n_flag[] = "-n4";
char mp_flag[] = "--proc-type=secondary";
char *argp[argc + 3];
- uint8_t nb_ports;
+ uint16_t nb_ports;
/* preparse app arguments */
ret = proc_info_preparse_args(argc, argv);
diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile
index e4a989fe..c75d7ed1 100644
--- a/app/test-crypto-perf/Makefile
+++ b/app/test-crypto-perf/Makefile
@@ -42,7 +42,13 @@ SRCS-y += cperf_options_parsing.c
SRCS-y += cperf_test_vectors.c
SRCS-y += cperf_test_throughput.c
SRCS-y += cperf_test_latency.c
+SRCS-y += cperf_test_pmd_cyclecount.c
SRCS-y += cperf_test_verify.c
SRCS-y += cperf_test_vector_parsing.c
+SRCS-y += cperf_test_common.c
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
+LDLIBS += -lrte_pmd_crypto_scheduler
+endif
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 88fb9725..23d30ca3 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -37,7 +37,7 @@
static int
cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@@ -48,10 +48,18 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
sym_op->cipher.data.length = options->test_buffer_size;
@@ -63,7 +71,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_null_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector __rte_unused,
@@ -74,10 +82,18 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* auth parameters */
sym_op->auth.data.length = options->test_buffer_size;
@@ -89,7 +105,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -100,10 +116,18 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@@ -132,7 +156,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
static int
cperf_set_ops_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -143,10 +167,18 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
if (test_vector->auth_iv.length) {
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
@@ -167,21 +199,29 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
@@ -211,7 +251,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
@@ -222,10 +262,18 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* cipher parameters */
if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
@@ -248,21 +296,29 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -300,29 +356,41 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
static int
cperf_set_ops_aead(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
uint16_t iv_offset)
{
uint16_t i;
+ /* AAD is placed after the IV */
+ uint16_t aad_offset = iv_offset +
+ RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16);
for (i = 0; i < nb_ops; i++) {
struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_crypto_op_attach_sym_session(ops[i], sess);
- sym_op->m_src = bufs_in[i];
- sym_op->m_dst = bufs_out[i];
+ sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+
+ /* Set dest mbuf to NULL if out-of-place (dst_buf_offset = 0) */
+ if (dst_buf_offset == 0)
+ sym_op->m_dst = NULL;
+ else
+ sym_op->m_dst = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ dst_buf_offset);
/* AEAD parameters */
sym_op->aead.data.length = options->test_buffer_size;
- sym_op->aead.data.offset =
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16);
+ sym_op->aead.data.offset = 0;
- sym_op->aead.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
+ sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
+ uint8_t *, aad_offset);
+ sym_op->aead.aad.phys_addr = rte_crypto_op_ctophys_offset(ops[i],
+ aad_offset);
if (options->aead_op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
sym_op->aead.digest.data = test_vector->digest.data;
@@ -335,21 +403,29 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
struct rte_mbuf *buf, *tbuf;
if (options->out_of_place) {
- buf = bufs_out[i];
+ buf = sym_op->m_dst;
} else {
- tbuf = bufs_in[i];
+ tbuf = sym_op->m_src;
while ((tbuf->next != NULL) &&
(offset >= tbuf->data_len)) {
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->aead.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
}
@@ -358,8 +434,26 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ops[i],
uint8_t *, iv_offset);
- memcpy(iv_ptr, test_vector->aead_iv.data,
+ /*
+ * If doing AES-CCM, nonce is copied one byte
+ * after the start of IV field, and AAD is copied
+ * 18 bytes after the start of the AAD field.
+ */
+ if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ memcpy(iv_ptr + 1, test_vector->aead_iv.data,
test_vector->aead_iv.length);
+
+ memcpy(ops[i]->sym->aead.aad.data + 18,
+ test_vector->aad.data,
+ test_vector->aad.length);
+ } else {
+ memcpy(iv_ptr, test_vector->aead_iv.data,
+ test_vector->aead_iv.length);
+
+ memcpy(ops[i]->sym->aead.aad.data,
+ test_vector->aad.data,
+ test_vector->aad.length);
+ }
}
}
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 1f8fa937..94951cc3 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -47,7 +47,7 @@ typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
uint16_t iv_offset);
typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
- struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset,
uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector,
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 10cd2d8a..da4fb47c 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -11,7 +11,8 @@
#define CPERF_TOTAL_OPS ("total-ops")
#define CPERF_BURST_SIZE ("burst-sz")
#define CPERF_BUFFER_SIZE ("buffer-sz")
-#define CPERF_SEGMENTS_NB ("segments-nb")
+#define CPERF_SEGMENT_SIZE ("segment-sz")
+#define CPERF_DESC_NB ("desc-nb")
#define CPERF_DEVTYPE ("devtype")
#define CPERF_OPTYPE ("optype")
@@ -40,12 +41,16 @@
#define CPERF_CSV ("csv-friendly")
+/* benchmark-specific options */
+#define CPERF_PMDCC_DELAY_MS ("pmd-cyclecount-delay-ms")
+
#define MAX_LIST 32
enum cperf_perf_test_type {
CPERF_TEST_TYPE_THROUGHPUT,
CPERF_TEST_TYPE_LATENCY,
- CPERF_TEST_TYPE_VERIFY
+ CPERF_TEST_TYPE_VERIFY,
+ CPERF_TEST_TYPE_PMDCC
};
@@ -66,8 +71,10 @@ struct cperf_options {
uint32_t pool_sz;
uint32_t total_ops;
- uint32_t segments_nb;
+ uint32_t segment_sz;
uint32_t test_buffer_size;
+ uint32_t nb_descriptors;
+ uint16_t nb_qps;
uint32_t sessionless:1;
uint32_t out_of_place:1;
@@ -113,6 +120,8 @@ struct cperf_options {
uint32_t min_burst_size;
uint32_t inc_burst_size;
+ /* pmd-cyclecount specific options */
+ uint32_t pmdcc_delay;
};
void
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index 085aa8fe..ad43e84c 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -46,6 +46,47 @@ struct name_id_map {
uint32_t id;
};
+static void
+usage(char *progname)
+{
+ printf("%s [EAL options] --\n"
+ " --silent: disable options dump\n"
+ " --ptest throughput / latency / verify / pmd-cycleount :"
+ " set test type\n"
+ " --pool_sz N: set the number of crypto ops/mbufs allocated\n"
+ " --total-ops N: set the number of total operations performed\n"
+ " --burst-sz N: set the number of packets per burst\n"
+ " --buffer-sz N: set the size of a single packet\n"
+ " --segment-sz N: set the size of the segment to use\n"
+ " --desc-nb N: set number of descriptors for each crypto device\n"
+ " --devtype TYPE: set crypto device type to use\n"
+ " --optype cipher-only / auth-only / cipher-then-auth /\n"
+ " auth-then-cipher / aead : set operation type\n"
+ " --sessionless: enable session-less crypto operations\n"
+ " --out-of-place: enable out-of-place crypto operations\n"
+ " --test-file NAME: set the test vector file path\n"
+ " --test-name NAME: set specific test name section in test file\n"
+ " --cipher-algo ALGO: set cipher algorithm\n"
+ " --cipher-op encrypt / decrypt: set the cipher operation\n"
+ " --cipher-key-sz N: set the cipher key size\n"
+ " --cipher-iv-sz N: set the cipher IV size\n"
+ " --auth-algo ALGO: set auth algorithm\n"
+ " --auth-op generate / verify: set the auth operation\n"
+ " --auth-key-sz N: set the auth key size\n"
+ " --auth-iv-sz N: set the auth IV size\n"
+ " --aead-algo ALGO: set AEAD algorithm\n"
+ " --aead-op encrypt / decrypt: set the AEAD operation\n"
+ " --aead-key-sz N: set the AEAD key size\n"
+ " --aead-iv-sz N: set the AEAD IV size\n"
+ " --aead-aad-sz N: set the AEAD AAD size\n"
+ " --digest-sz N: set the digest size\n"
+ " --pmd-cyclecount-delay-ms N: set delay between enqueue\n"
+ " and dequeue in pmd-cyclecount benchmarking mode\n"
+ " --csv-friendly: enable test result output CSV friendly\n"
+ " -h: prints this help\n",
+ progname);
+}
+
static int
get_str_key_id_mapping(struct name_id_map *map, unsigned int map_len,
const char *str_key)
@@ -76,6 +117,10 @@ parse_cperf_test_type(struct cperf_options *opts, const char *arg)
{
cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
CPERF_TEST_TYPE_LATENCY
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_PMDCC],
+ CPERF_TEST_TYPE_PMDCC
}
};
@@ -137,6 +182,7 @@ parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
if (copy_arg == NULL)
return -1;
+ errno = 0;
token = strtok(copy_arg, ":");
/* Parse minimum value */
@@ -203,6 +249,7 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
if (copy_arg == NULL)
return -1;
+ errno = 0;
token = strtok(copy_arg, ",");
/* Parse first value */
@@ -322,17 +369,35 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_segments_nb(struct cperf_options *opts, const char *arg)
+parse_segment_sz(struct cperf_options *opts, const char *arg)
{
- int ret = parse_uint32_t(&opts->segments_nb, arg);
+ int ret = parse_uint32_t(&opts->segment_sz, arg);
if (ret) {
- RTE_LOG(ERR, USER1, "failed to parse segments number\n");
+ RTE_LOG(ERR, USER1, "failed to parse segment size\n");
return -1;
}
- if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
- RTE_LOG(ERR, USER1, "invalid segments number specified\n");
+ if (opts->segment_sz == 0) {
+ RTE_LOG(ERR, USER1, "Segment size has to be bigger than 0\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_desc_nb(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->nb_descriptors, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse descriptors number\n");
+ return -1;
+ }
+
+ if (opts->nb_descriptors == 0) {
+ RTE_LOG(ERR, USER1, "invalid descriptors number specified\n");
return -1;
}
@@ -623,6 +688,20 @@ parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
return 0;
}
+static int
+parse_pmd_cyclecount_delay_ms(struct cperf_options *opts,
+ const char *arg)
+{
+ int ret = parse_uint32_t(&opts->pmdcc_delay, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse pmd-cyclecount delay\n");
+ return -1;
+ }
+
+ return 0;
+}
+
typedef int (*option_parser_t)(struct cperf_options *opts,
const char *arg);
@@ -640,7 +719,8 @@ static struct option lgopts[] = {
{ CPERF_TOTAL_OPS, required_argument, 0, 0 },
{ CPERF_BURST_SIZE, required_argument, 0, 0 },
{ CPERF_BUFFER_SIZE, required_argument, 0, 0 },
- { CPERF_SEGMENTS_NB, required_argument, 0, 0 },
+ { CPERF_SEGMENT_SIZE, required_argument, 0, 0 },
+ { CPERF_DESC_NB, required_argument, 0, 0 },
{ CPERF_DEVTYPE, required_argument, 0, 0 },
{ CPERF_OPTYPE, required_argument, 0, 0 },
@@ -674,6 +754,8 @@ static struct option lgopts[] = {
{ CPERF_CSV, no_argument, 0, 0},
+ { CPERF_PMDCC_DELAY_MS, required_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -684,6 +766,7 @@ cperf_options_default(struct cperf_options *opts)
opts->pool_sz = 8192;
opts->total_ops = 10000000;
+ opts->nb_descriptors = 2048;
opts->buffer_size_list[0] = 64;
opts->buffer_size_count = 1;
@@ -697,10 +780,15 @@ cperf_options_default(struct cperf_options *opts)
opts->min_burst_size = 32;
opts->inc_burst_size = 0;
- opts->segments_nb = 1;
+ /*
+ * Will be parsed from command line or set to
+ * maximum buffer size + digest, later
+ */
+ opts->segment_sz = 0;
strncpy(opts->device_type, "crypto_aesni_mb",
sizeof(opts->device_type));
+ opts->nb_qps = 1;
opts->op_type = CPERF_CIPHER_THEN_AUTH;
@@ -727,6 +815,8 @@ cperf_options_default(struct cperf_options *opts)
opts->aead_aad_sz = 0;
opts->digest_sz = 12;
+
+ opts->pmdcc_delay = 0;
}
static int
@@ -739,7 +829,8 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_TOTAL_OPS, parse_total_ops },
{ CPERF_BURST_SIZE, parse_burst_sz },
{ CPERF_BUFFER_SIZE, parse_buffer_sz },
- { CPERF_SEGMENTS_NB, parse_segments_nb },
+ { CPERF_SEGMENT_SIZE, parse_segment_sz },
+ { CPERF_DESC_NB, parse_desc_nb },
{ CPERF_DEVTYPE, parse_device_type },
{ CPERF_OPTYPE, parse_op_type },
{ CPERF_SESSIONLESS, parse_sessionless },
@@ -761,6 +852,7 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
{ CPERF_DIGEST_SZ, parse_digest_sz },
{ CPERF_CSV, parse_csv_friendly},
+ { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms},
};
unsigned int i;
@@ -778,11 +870,14 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
{
int opt, retval, opt_idx;
- while ((opt = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ while ((opt = getopt_long(argc, argv, "h", lgopts, &opt_idx)) != EOF) {
switch (opt) {
+ case 'h':
+ usage(argv[0]);
+ rte_exit(EXIT_SUCCESS, "Displayed help\n");
+ break;
/* long options */
case 0:
-
retval = cperf_opts_parse_long(opt_idx, options);
if (retval != 0)
return retval;
@@ -790,6 +885,7 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
break;
default:
+ usage(argv[0]);
return -EINVAL;
}
}
@@ -830,14 +926,26 @@ check_cipher_buffer_length(struct cperf_options *options)
if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
- for (buffer_size = options->min_buffer_size;
- buffer_size < options->max_buffer_size;
- buffer_size += options->inc_buffer_size) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
if ((buffer_size % DES_BLOCK_SIZE) != 0) {
RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
"not suitable for the algorithm selected\n");
return -EINVAL;
}
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
}
}
@@ -847,9 +955,21 @@ check_cipher_buffer_length(struct cperf_options *options)
int
cperf_options_check(struct cperf_options *options)
{
- if (options->segments_nb > options->min_buffer_size) {
+ if (options->op_type == CPERF_CIPHER_ONLY)
+ options->digest_sz = 0;
+
+ /*
+ * If segment size is not set, assume only one segment,
+ * big enough to contain the largest buffer and the digest
+ */
+ if (options->segment_sz == 0)
+ options->segment_sz = options->max_buffer_size +
+ options->digest_sz;
+
+ if (options->segment_sz < options->digest_sz) {
RTE_LOG(ERR, USER1,
- "Segments number greater than buffer size.\n");
+ "Segment size should be at least "
+ "the size of the digest\n");
return -EINVAL;
}
@@ -882,13 +1002,6 @@ cperf_options_check(struct cperf_options *options)
}
if (options->test == CPERF_TEST_TYPE_VERIFY &&
- options->total_ops > options->pool_sz) {
- RTE_LOG(ERR, USER1, "Total number of ops must be less than or"
- " equal to the pool size.\n");
- return -EINVAL;
- }
-
- if (options->test == CPERF_TEST_TYPE_VERIFY &&
(options->inc_buffer_size != 0 ||
options->buffer_size_count > 1)) {
RTE_LOG(ERR, USER1, "Only one buffer size is allowed when "
@@ -904,6 +1017,14 @@ cperf_options_check(struct cperf_options *options)
return -EINVAL;
}
+ if (options->test == CPERF_TEST_TYPE_PMDCC &&
+ options->pool_sz < options->nb_descriptors) {
+ RTE_LOG(ERR, USER1, "For pmd cyclecount benchmarks, pool size "
+ "must be equal or greater than the number of "
+ "cryptodev descriptors.\n");
+ return -EINVAL;
+ }
+
if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
options->auth_op !=
@@ -965,13 +1086,16 @@ cperf_options_dump(struct cperf_options *opts)
printf("%u ", opts->burst_size_list[size_idx]);
printf("\n");
}
- printf("\n# segments per buffer: %u\n", opts->segments_nb);
+ printf("\n# segment size: %u\n", opts->segment_sz);
printf("#\n");
printf("# cryptodev type: %s\n", opts->device_type);
printf("#\n");
+ printf("# number of queue pairs per device: %u\n", opts->nb_qps);
printf("# crypto operation: %s\n", cperf_op_type_strs[opts->op_type]);
printf("# sessionless: %s\n", opts->sessionless ? "yes" : "no");
printf("# out of place: %s\n", opts->out_of_place ? "yes" : "no");
+ if (opts->test == CPERF_TEST_TYPE_PMDCC)
+ printf("# inter-burst delay: %u ms\n", opts->pmdcc_delay);
printf("#\n");
diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
new file mode 100644
index 00000000..328744ef
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_common.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "cperf_test_common.h"
+
+struct obj_params {
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+ uint16_t segment_sz;
+ uint16_t segments_nb;
+};
+
+static void
+fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
+{
+ uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_iova = rte_mempool_virt2iova(obj) +
+ mbuf_offset + mbuf_hdr_size;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+}
+
+static void
+fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
+ uint16_t segments_nb)
+{
+ uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
+ uint16_t remaining_segments = segments_nb;
+ struct rte_mbuf *next_mbuf;
+ rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
+ mbuf_offset + mbuf_hdr_size;
+
+ do {
+ /* start of buffer is after mbuf structure and priv data */
+ m->priv_size = 0;
+ m->buf_addr = (char *)m + mbuf_hdr_size;
+ m->buf_iova = next_seg_phys_addr;
+ next_seg_phys_addr += mbuf_hdr_size + segment_sz;
+ m->buf_len = segment_sz;
+ m->data_len = segment_sz;
+
+ /* No headroom needed for the buffer */
+ m->data_off = 0;
+
+ /* init some constant fields */
+ m->pool = mp;
+ m->nb_segs = segments_nb;
+ m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
+ mbuf_hdr_size + segment_sz);
+ m->next = next_mbuf;
+ m = next_mbuf;
+ remaining_segments--;
+
+ } while (remaining_segments > 0);
+
+ m->next = NULL;
+}
+
+static void
+mempool_obj_init(struct rte_mempool *mp,
+ void *opaque_arg,
+ void *obj,
+ __attribute__((unused)) unsigned int i)
+{
+ struct obj_params *params = opaque_arg;
+ struct rte_crypto_op *op = obj;
+ struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->src_buf_offset);
+ /* Set crypto operation */
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = rte_mem_virt2phy(obj);
+ op->mempool = mp;
+
+ /* Set source buffer */
+ op->sym->m_src = m;
+ if (params->segments_nb == 1)
+ fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz);
+ else
+ fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
+ params->segment_sz, params->segments_nb);
+
+
+ /* Set destination buffer */
+ if (params->dst_buf_offset) {
+ m = (struct rte_mbuf *) ((uint8_t *) obj +
+ params->dst_buf_offset);
+ fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
+ params->segment_sz);
+ op->sym->m_dst = m;
+ } else
+ op->sym->m_dst = NULL;
+}
+
+int
+cperf_alloc_common_memory(const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint8_t dev_id, uint16_t qp_id,
+ size_t extra_op_priv_size,
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool)
+{
+ char pool_name[32] = "";
+ int ret;
+
+ /* Calculate the object size */
+ uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ uint16_t crypto_op_private_size;
+ /*
+ * If doing AES-CCM, IV field needs to be 16 bytes long,
+ * and AAD field needs to be long enough to have 18 bytes,
+ * plus the length of the AAD, and all rounded to a
+ * multiple of 16 bytes.
+ */
+ if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ crypto_op_private_size = extra_op_priv_size +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
+ RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
+ } else {
+ crypto_op_private_size = extra_op_priv_size +
+ test_vector->cipher_iv.length +
+ test_vector->auth_iv.length +
+ test_vector->aead_iv.length +
+ options->aead_aad_sz;
+ }
+
+ uint16_t crypto_op_total_size = crypto_op_size +
+ crypto_op_private_size;
+ uint16_t crypto_op_total_size_padded =
+ RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
+ uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
+ uint32_t max_size = options->max_buffer_size + options->digest_sz;
+ uint16_t segments_nb = (max_size % options->segment_sz) ?
+ (max_size / options->segment_sz) + 1 :
+ max_size / options->segment_sz;
+ uint32_t obj_size = crypto_op_total_size_padded +
+ (mbuf_size * segments_nb);
+
+ snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
+ dev_id, qp_id);
+
+ *src_buf_offset = crypto_op_total_size_padded;
+
+ struct obj_params params = {
+ .segment_sz = options->segment_sz,
+ .segments_nb = segments_nb,
+ .src_buf_offset = crypto_op_total_size_padded,
+ .dst_buf_offset = 0
+ };
+
+ if (options->out_of_place) {
+ *dst_buf_offset = *src_buf_offset +
+ (mbuf_size * segments_nb);
+ params.dst_buf_offset = *dst_buf_offset;
+ /* Destination buffer will be one segment only */
+ obj_size += max_size;
+ }
+
+ *pool = rte_mempool_create_empty(pool_name,
+ options->pool_sz, obj_size, 512, 0,
+ rte_socket_id(), 0);
+ if (*pool == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Cannot allocate mempool for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ ret = rte_mempool_set_ops_byname(*pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ if (ret != 0) {
+ RTE_LOG(ERR, USER1,
+ "Error setting mempool handler for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ ret = rte_mempool_populate_default(*pool);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Error populating mempool for device %u\n",
+ dev_id);
+ return -1;
+ }
+
+ rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
+
+ return 0;
+}
diff --git a/app/test-crypto-perf/cperf_test_common.h b/app/test-crypto-perf/cperf_test_common.h
new file mode 100644
index 00000000..4cee7852
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_common.h
@@ -0,0 +1,52 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_COMMON_H_
+#define _CPERF_TEST_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mempool.h>
+
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+int
+cperf_alloc_common_memory(const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint8_t dev_id, uint16_t qp_id,
+ size_t extra_op_priv_size,
+ uint32_t *src_buf_offset,
+ uint32_t *dst_buf_offset,
+ struct rte_mempool **pool);
+
+#endif /* _CPERF_TEST_COMMON_H_ */
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 58b21abd..ca2a4ba6 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -37,7 +37,7 @@
#include "cperf_test_latency.h"
#include "cperf_ops.h"
-
+#include "cperf_test_common.h"
struct cperf_op_result {
uint64_t tsc_start;
@@ -50,17 +50,15 @@ struct cperf_latency_ctx {
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
struct cperf_op_result *res;
@@ -74,124 +72,22 @@ struct priv_op_data {
#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
static void
-cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
+cperf_latency_test_free(struct cperf_latency_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx->res);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_latency_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -200,8 +96,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_latency_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
+ size_t extra_op_priv_size = sizeof(struct priv_op_data);
ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
if (ctx == NULL)
@@ -224,80 +119,10 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name),
- "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = sizeof(struct priv_op_data) +
- test_vector->cipher_iv.length +
- test_vector->auth_iv.length +
- test_vector->aead_iv.length;
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
-
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id,
+ extra_op_priv_size,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
@@ -308,7 +133,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
return ctx;
err:
- cperf_latency_test_free(ctx, mbuf_idx);
+ cperf_latency_test_free(ctx);
return NULL;
}
@@ -347,7 +172,7 @@ cperf_latency_test_runner(void *arg)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -373,7 +198,7 @@ cperf_latency_test_runner(void *arg)
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
- uint64_t m_idx = 0, b_idx = 0;
+ uint64_t b_idx = 0;
uint64_t tsc_val, tsc_end, tsc_start;
uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
@@ -388,11 +213,9 @@ cperf_latency_test_runner(void *arg)
ctx->options->total_ops -
enqd_tot;
- /* Allocate crypto ops from pool */
- if (burst_size != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, burst_size)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ burst_size) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -402,8 +225,8 @@ cperf_latency_test_runner(void *arg)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
burst_size, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
@@ -432,7 +255,7 @@ cperf_latency_test_runner(void *arg)
/* Free memory for not enqueued operations */
if (ops_enqd != burst_size)
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)&ops[ops_enqd],
burst_size - ops_enqd);
@@ -448,16 +271,11 @@ cperf_latency_test_runner(void *arg)
}
if (likely(ops_deqd)) {
- /*
- * free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
+ /* Free crypto ops so they can be reused. */
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
@@ -469,9 +287,6 @@ cperf_latency_test_runner(void *arg)
enqd_max = max(ops_enqd, enqd_max);
enqd_min = min(ops_enqd, enqd_min);
- m_idx += ops_enqd;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
b_idx++;
}
@@ -490,7 +305,7 @@ cperf_latency_test_runner(void *arg)
for (i = 0; i < ops_deqd; i++)
store_timestamp(ops_processed[i], tsc_end);
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
deqd_tot += ops_deqd;
@@ -586,7 +401,5 @@ cperf_latency_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_latency_test_free(ctx, ctx->options->pool_sz);
+ cperf_latency_test_free(ctx);
}
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
new file mode 100644
index 00000000..9b41724a
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -0,0 +1,520 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "cperf_ops.h"
+#include "cperf_test_pmd_cyclecount.h"
+#include "cperf_test_common.h"
+
+#define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
+#define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
+#define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
+#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
+
+struct cperf_pmd_cyclecount_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pool;
+ struct rte_crypto_op **ops;
+ struct rte_crypto_op **ops_processed;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+};
+
+struct pmd_cyclecount_state {
+ struct cperf_pmd_cyclecount_ctx *ctx;
+ const struct cperf_options *opts;
+ uint32_t lcore;
+ uint64_t delay;
+ int linearize;
+ uint32_t ops_enqd;
+ uint32_t ops_deqd;
+ uint32_t ops_enq_retries;
+ uint32_t ops_deq_retries;
+ double cycles_per_build;
+ double cycles_per_enq;
+ double cycles_per_deq;
+};
+
+static const uint16_t iv_offset =
+ sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
+
+static void
+cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
+{
+ if (ctx) {
+ if (ctx->sess) {
+ rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
+
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
+
+ if (ctx->ops)
+ rte_free(ctx->ops);
+
+ if (ctx->ops_processed)
+ rte_free(ctx->ops_processed);
+
+ rte_free(ctx);
+ }
+}
+
+void *
+cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
+ uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_pmd_cyclecount_ctx *ctx = NULL;
+
+ /* preallocate buffers for crypto ops as they can get quite big */
+ size_t alloc_sz = sizeof(struct rte_crypto_op *) *
+ options->nb_descriptors;
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_pmd_cyclecount_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ /* IV goes at the end of the crypto operation */
+ uint16_t iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ ctx->sess = op_fns->sess_create(
+ sess_mp, dev_id, options, test_vector, iv_offset);
+ if (ctx->sess == NULL)
+ goto err;
+
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
+ goto err;
+
+ ctx->ops = rte_malloc("ops", alloc_sz, 0);
+ if (!ctx->ops)
+ goto err;
+
+ ctx->ops_processed = rte_malloc("ops_processed", alloc_sz, 0);
+ if (!ctx->ops_processed)
+ goto err;
+
+ return ctx;
+
+err:
+ cperf_pmd_cyclecount_test_free(ctx);
+
+ return NULL;
+}
+
+/* benchmark alloc-build-free of ops */
+static inline int
+pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
+ uint16_t test_burst_size)
+{
+ uint32_t iter_ops_left = state->opts->total_ops - cur_op;
+ uint32_t iter_ops_needed =
+ RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
+ uint32_t cur_iter_op;
+
+ for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
+ cur_iter_op += test_burst_size) {
+ uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
+ test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
+
+ /* Setup crypto op, attach mbuf etc */
+ (state->ctx->populate_ops)(ops,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
+ state->ctx->sess, state->opts,
+ state->ctx->test_vector, iv_offset);
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ /* Check if source mbufs require coalescing */
+ if (state->linearize) {
+ uint8_t i;
+ for (i = 0; i < burst_size; i++) {
+ struct rte_mbuf *src = ops[i]->sym->m_src;
+ rte_pktmbuf_linearize(src);
+ }
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+ rte_mempool_put_bulk(state->ctx->pool, (void **)ops,
+ burst_size);
+ }
+
+ return 0;
+}
+
+/* allocate and build ops (no free) */
+static int
+pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ uint32_t cur_iter_op;
+
+ for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
+ cur_iter_op += test_burst_size) {
+ uint32_t burst_size = RTE_MIN(
+ iter_ops_needed - cur_iter_op, test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(state->ctx->pool, (void **)ops,
+ burst_size) != 0) {
+ RTE_LOG(ERR, USER1,
+ "Failed to allocate more crypto operations "
+ "from the the crypto operation pool.\n"
+ "Consider increasing the pool size "
+ "with --pool-sz\n");
+ return -1;
+ }
+
+ /* Setup crypto op, attach mbuf etc */
+ (state->ctx->populate_ops)(ops,
+ state->ctx->src_buf_offset,
+ state->ctx->dst_buf_offset,
+ burst_size,
+ state->ctx->sess, state->opts,
+ state->ctx->test_vector, iv_offset);
+ }
+ return 0;
+}
+
+/* benchmark enqueue, returns number of ops enqueued */
+static uint32_t
+pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ /* Enqueue full descriptor ring of ops on crypto device */
+ uint32_t cur_iter_op = 0;
+ while (cur_iter_op < iter_ops_needed) {
+ uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
+ test_burst_size);
+ struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
+ uint32_t burst_enqd;
+
+ burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
+ state->ctx->qp_id, ops, burst_size);
+
+ /* if we couldn't enqueue anything, the queue is full */
+ if (!burst_enqd) {
+ /* don't try to dequeue anything we didn't enqueue */
+ return cur_iter_op;
+ }
+
+ if (burst_enqd < burst_size)
+ state->ops_enq_retries++;
+ state->ops_enqd += burst_enqd;
+ cur_iter_op += burst_enqd;
+ }
+ return iter_ops_needed;
+}
+
+/* benchmark dequeue */
+static void
+pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
+ uint32_t iter_ops_needed, uint16_t test_burst_size)
+{
+ /* Dequeue full descriptor ring of ops on crypto device */
+ uint32_t cur_iter_op = 0;
+ while (cur_iter_op < iter_ops_needed) {
+ uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
+ test_burst_size);
+ struct rte_crypto_op **ops_processed =
+ &state->ctx->ops[cur_iter_op];
+ uint32_t burst_deqd;
+
+ burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
+ state->ctx->qp_id, ops_processed, burst_size);
+
+ if (burst_deqd < burst_size)
+ state->ops_deq_retries++;
+ state->ops_deqd += burst_deqd;
+ cur_iter_op += burst_deqd;
+ }
+}
+
+/* run benchmark per burst size */
+static inline int
+pmd_cyclecount_bench_burst_sz(
+ struct pmd_cyclecount_state *state, uint16_t test_burst_size)
+{
+ uint64_t tsc_start;
+ uint64_t tsc_end;
+ uint64_t tsc_op;
+ uint64_t tsc_enq;
+ uint64_t tsc_deq;
+ uint32_t cur_op;
+
+ /* reset all counters */
+ tsc_enq = 0;
+ tsc_deq = 0;
+ state->ops_enqd = 0;
+ state->ops_enq_retries = 0;
+ state->ops_deqd = 0;
+ state->ops_deq_retries = 0;
+
+ /*
+ * Benchmark crypto op alloc-build-free separately.
+ */
+ tsc_start = rte_rdtsc_precise();
+
+ for (cur_op = 0; cur_op < state->opts->total_ops;
+ cur_op += state->opts->nb_descriptors) {
+ if (unlikely(pmd_cyclecount_bench_ops(
+ state, cur_op, test_burst_size)))
+ return -1;
+ }
+
+ tsc_end = rte_rdtsc_precise();
+ tsc_op = tsc_end - tsc_start;
+
+
+ /*
+ * Hardware acceleration cyclecount benchmarking loop.
+ *
+ * We're benchmarking raw enq/deq performance by filling up the device
+ * queue, so we never get any failed enqs unless the driver won't accept
+ * the exact number of descriptors we requested, or the driver won't
+ * wrap around the end of the TX ring. However, since we're only
+ * dequeueing once we've filled up the queue, we have to benchmark it
+ * piecemeal and then average out the results.
+ */
+ cur_op = 0;
+ while (cur_op < state->opts->total_ops) {
+ uint32_t iter_ops_left = state->opts->total_ops - cur_op;
+ uint32_t iter_ops_needed = RTE_MIN(
+ state->opts->nb_descriptors, iter_ops_left);
+ uint32_t iter_ops_allocd = iter_ops_needed;
+
+ /* allocate and build ops */
+ if (unlikely(pmd_cyclecount_build_ops(state, iter_ops_needed,
+ test_burst_size)))
+ return -1;
+
+ tsc_start = rte_rdtsc_precise();
+
+ /* fill up TX ring */
+ iter_ops_needed = pmd_cyclecount_bench_enq(state,
+ iter_ops_needed, test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ tsc_enq += tsc_end - tsc_start;
+
+ /* allow for HW to catch up */
+ if (state->delay)
+ rte_delay_us_block(state->delay);
+
+ tsc_start = rte_rdtsc_precise();
+
+ /* drain RX ring */
+ pmd_cyclecount_bench_deq(state, iter_ops_needed,
+ test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ tsc_deq += tsc_end - tsc_start;
+
+ cur_op += iter_ops_needed;
+
+ /*
+ * we may not have processed all ops that we allocated, so
+ * free everything we've allocated.
+ */
+ rte_mempool_put_bulk(state->ctx->pool,
+ (void **)state->ctx->ops, iter_ops_allocd);
+ }
+
+ state->cycles_per_build = (double)tsc_op / state->opts->total_ops;
+ state->cycles_per_enq = (double)tsc_enq / state->ops_enqd;
+ state->cycles_per_deq = (double)tsc_deq / state->ops_deqd;
+
+ return 0;
+}
+
+int
+cperf_pmd_cyclecount_test_runner(void *test_ctx)
+{
+ struct pmd_cyclecount_state state = {0};
+ const struct cperf_options *opts;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
+
+ state.ctx = test_ctx;
+ opts = state.ctx->options;
+ state.opts = opts;
+ state.lcore = rte_lcore_id();
+ state.linearize = 0;
+
+ static int only_once;
+ static bool warmup = true;
+
+ /*
+ * We need a small delay to allow for hardware to process all the crypto
+ * operations. We can't automatically figure out what the delay should
+ * be, so we leave it up to the user (by default it's 0).
+ */
+ state.delay = 1000 * opts->pmdcc_delay;
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+
+ /* Check if source mbufs require coalescing */
+ if (opts->segments_sz < ctx->options->max_buffer_size) {
+ rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
+ 0) {
+ state.linearize = 1;
+ }
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ state.ctx->lcore_id = state.lcore;
+
+ /* Get first size from range or list */
+ if (opts->inc_burst_size != 0)
+ test_burst_size = opts->min_burst_size;
+ else
+ test_burst_size = opts->burst_size_list[0];
+
+ while (test_burst_size <= opts->max_burst_size) {
+ /* do a benchmark run */
+ if (pmd_cyclecount_bench_burst_sz(&state, test_burst_size))
+ return -1;
+
+ /*
+ * First run is always a warm up run.
+ */
+ if (warmup) {
+ warmup = false;
+ continue;
+ }
+
+ if (!opts->csv) {
+ if (!only_once)
+ printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
+ "Burst Size", "Enqueued",
+ "Dequeued", "Enq Retries",
+ "Deq Retries", "Cycles/Op",
+ "Cycles/Enq", "Cycles/Deq");
+ only_once = 1;
+
+ printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
+ opts->test_buffer_size, test_burst_size,
+ state.ops_enqd, state.ops_deqd,
+ state.ops_enq_retries,
+ state.ops_deq_retries,
+ state.cycles_per_build,
+ state.cycles_per_enq,
+ state.cycles_per_deq);
+ } else {
+ if (!only_once)
+ printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
+ "Burst Size", "Enqueued",
+ "Dequeued", "Enq Retries",
+ "Deq Retries", "Cycles/Op",
+ "Cycles/Enq", "Cycles/Deq");
+ only_once = 1;
+
+ printf(CSV_LINE_FMT, state.ctx->lcore_id,
+ opts->test_buffer_size, test_burst_size,
+ state.ops_enqd, state.ops_deqd,
+ state.ops_enq_retries,
+ state.ops_deq_retries,
+ state.cycles_per_build,
+ state.cycles_per_enq,
+ state.cycles_per_deq);
+ }
+
+ /* Get next size from range or list */
+ if (opts->inc_burst_size != 0)
+ test_burst_size += opts->inc_burst_size;
+ else {
+ if (++burst_size_idx == opts->burst_size_count)
+ break;
+ test_burst_size = opts->burst_size_list[burst_size_idx];
+ }
+ }
+
+ return 0;
+}
+
+void
+cperf_pmd_cyclecount_test_destructor(void *arg)
+{
+ struct cperf_pmd_cyclecount_ctx *ctx = arg;
+
+ if (ctx == NULL)
+ return;
+
+ cperf_pmd_cyclecount_test_free(ctx);
+}
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.h b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h
new file mode 100644
index 00000000..93f0eae0
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.h
@@ -0,0 +1,61 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_PMD_CYCLECOUNT_H_
+#define _CPERF_TEST_PMD_CYCLECOUNT_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+void *
+cperf_pmd_cyclecount_test_constructor(
+ struct rte_mempool *sess_mp,
+ uint8_t dev_id,
+ uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_pmd_cyclecount_test_runner(void *test_ctx);
+
+void
+cperf_pmd_cyclecount_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_TEST_PMD_CYCLECOUNT_H_ */
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 3bb1cb05..b84dc630 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -37,145 +37,42 @@
#include "cperf_test_throughput.h"
#include "cperf_ops.h"
+#include "cperf_test_common.h"
struct cperf_throughput_ctx {
uint8_t dev_id;
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
static void
-cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
+cperf_throughput_test_free(struct cperf_throughput_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -184,8 +81,6 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_throughput_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
if (ctx == NULL)
@@ -198,7 +93,7 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the cryptop operation */
+ /* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
@@ -207,81 +102,14 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = test_vector->cipher_iv.length +
- test_vector->auth_iv.length + test_vector->aead_iv.length;
-
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
err:
- cperf_throughput_test_free(ctx, mbuf_idx);
+ cperf_throughput_test_free(ctx);
return NULL;
}
@@ -306,7 +134,7 @@ cperf_throughput_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -333,7 +161,7 @@ cperf_throughput_test_runner(void *test_ctx)
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
- uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+ uint64_t tsc_start, tsc_end, tsc_duration;
uint16_t ops_unused = 0;
@@ -349,11 +177,9 @@ cperf_throughput_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -363,10 +189,11 @@ cperf_throughput_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
- ops_needed, ctx->sess, ctx->options,
- ctx->test_vector, iv_offset);
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
+ ops_needed, ctx->sess,
+ ctx->options, ctx->test_vector,
+ iv_offset);
/**
* When ops_needed is smaller than ops_enqd, the
@@ -411,12 +238,8 @@ cperf_throughput_test_runner(void *test_ctx)
ops_processed, test_burst_size);
if (likely(ops_deqd)) {
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
@@ -429,9 +252,6 @@ cperf_throughput_test_runner(void *test_ctx)
ops_deqd_failed++;
}
- m_idx += ops_needed;
- m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
- 0 : m_idx;
}
/* Dequeue any operations still in the crypto device */
@@ -446,9 +266,8 @@ cperf_throughput_test_runner(void *test_ctx)
if (ops_deqd == 0)
ops_deqd_failed++;
else {
- rte_mempool_put_bulk(ctx->crypto_op_pool,
+ rte_mempool_put_bulk(ctx->pool,
(void **)ops_processed, ops_deqd);
-
ops_deqd_total += ops_deqd;
}
}
@@ -534,7 +353,5 @@ cperf_throughput_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_throughput_test_free(ctx, ctx->options->pool_sz);
+ cperf_throughput_test_free(ctx);
}
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 148a6041..d4736f9e 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -116,6 +116,20 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
+ if (test_vector->aead_key.data) {
+ printf("\naead_key =\n");
+ for (i = 0; i < test_vector->aead_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aead_key.length - 1))
+ printf("0x%02x", test_vector->aead_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->aead_key.data[i]);
+ }
+ printf("\n");
+ }
+
if (test_vector->cipher_iv.data) {
printf("\ncipher_iv =\n");
for (i = 0; i < test_vector->cipher_iv.length; ++i) {
@@ -142,6 +156,19 @@ show_test_vector(struct cperf_test_vector *test_vector)
printf("\n");
}
+ if (test_vector->aead_iv.data) {
+ printf("\naead_iv =\n");
+ for (i = 0; i < test_vector->aead_iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aead_iv.length - 1))
+ printf("0x%02x", test_vector->aead_iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->aead_iv.data[i]);
+ }
+ printf("\n");
+ }
+
if (test_vector->ciphertext.data) {
printf("\nciphertext =\n");
for (i = 0; i < test_vector->ciphertext.length; ++i) {
@@ -345,6 +372,20 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_key.length = opts->auth_key_sz;
}
+ } else if (strstr(key_token, "aead_key")) {
+ rte_free(vector->aead_key.data);
+ vector->aead_key.data = data;
+ if (tc_found)
+ vector->aead_key.length = data_length;
+ else {
+ if (opts->aead_key_sz > data_length) {
+ printf("Global aead_key shorter than "
+ "aead_key_sz\n");
+ return -1;
+ }
+ vector->aead_key.length = opts->aead_key_sz;
+ }
+
} else if (strstr(key_token, "cipher_iv")) {
rte_free(vector->cipher_iv.data);
vector->cipher_iv.data = data;
@@ -373,6 +414,20 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
vector->auth_iv.length = opts->auth_iv_sz;
}
+ } else if (strstr(key_token, "aead_iv")) {
+ rte_free(vector->aead_iv.data);
+ vector->aead_iv.data = data;
+ if (tc_found)
+ vector->aead_iv.length = data_length;
+ else {
+ if (opts->aead_iv_sz > data_length) {
+ printf("Global aead iv shorter than "
+ "aead_iv_sz\n");
+ return -1;
+ }
+ vector->aead_iv.length = opts->aead_iv_sz;
+ }
+
} else if (strstr(key_token, "ciphertext")) {
rte_free(vector->ciphertext.data);
vector->ciphertext.data = data;
@@ -390,7 +445,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "aad")) {
rte_free(vector->aad.data);
vector->aad.data = data;
- vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ vector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);
if (tc_found)
vector->aad.length = data_length;
else {
@@ -405,7 +460,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "digest")) {
rte_free(vector->digest.data);
vector->digest.data = data;
- vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.phys_addr = rte_malloc_virt2iova(
vector->digest.data);
if (tc_found)
vector->digest.length = data_length;
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index e51dcc3f..fa911ff6 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest,
options->digest_sz);
@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.phys_addr = rte_malloc_virt2iova(t_vec->aad.data);
t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest, options->digest_sz);
t_vec->data.aead_offset = 0;
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index 85955703..cb5d8284 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -78,13 +78,13 @@ struct cperf_test_vector {
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} aad;
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint16_t length;
} digest;
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index a314646c..6945c8b4 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -37,23 +37,22 @@
#include "cperf_test_verify.h"
#include "cperf_ops.h"
+#include "cperf_test_common.h"
struct cperf_verify_ctx {
uint8_t dev_id;
uint16_t qp_id;
uint8_t lcore_id;
- struct rte_mempool *pkt_mbuf_pool_in;
- struct rte_mempool *pkt_mbuf_pool_out;
- struct rte_mbuf **mbufs_in;
- struct rte_mbuf **mbufs_out;
-
- struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *pool;
struct rte_cryptodev_sym_session *sess;
cperf_populate_ops_t populate_ops;
+ uint32_t src_buf_offset;
+ uint32_t dst_buf_offset;
+
const struct cperf_options *options;
const struct cperf_test_vector *test_vector;
};
@@ -63,123 +62,21 @@ struct cperf_op_result {
};
static void
-cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
+cperf_verify_test_free(struct cperf_verify_ctx *ctx)
{
- uint32_t i;
-
if (ctx) {
if (ctx->sess) {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
- if (ctx->mbufs_in) {
- for (i = 0; i < mbuf_nb; i++)
- rte_pktmbuf_free(ctx->mbufs_in[i]);
-
- rte_free(ctx->mbufs_in);
- }
-
- if (ctx->mbufs_out) {
- for (i = 0; i < mbuf_nb; i++) {
- if (ctx->mbufs_out[i] != NULL)
- rte_pktmbuf_free(ctx->mbufs_out[i]);
- }
-
- rte_free(ctx->mbufs_out);
- }
-
- if (ctx->pkt_mbuf_pool_in)
- rte_mempool_free(ctx->pkt_mbuf_pool_in);
-
- if (ctx->pkt_mbuf_pool_out)
- rte_mempool_free(ctx->pkt_mbuf_pool_out);
-
- if (ctx->crypto_op_pool)
- rte_mempool_free(ctx->crypto_op_pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
rte_free(ctx);
}
}
-static struct rte_mbuf *
-cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
- const struct cperf_options *options,
- const struct cperf_test_vector *test_vector)
-{
- struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
- uint8_t *mbuf_data;
- uint8_t *test_data =
- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
- test_vector->plaintext.data :
- test_vector->ciphertext.data;
-
- mbuf = rte_pktmbuf_alloc(mempool);
- if (mbuf == NULL)
- goto error;
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
-
- while (segments_nb) {
- struct rte_mbuf *m;
-
- m = rte_pktmbuf_alloc(mempool);
- if (m == NULL)
- goto error;
-
- rte_pktmbuf_chain(mbuf, m);
-
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
- }
-
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
-
- memcpy(mbuf_data, test_data, last_sz);
- }
-
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
- if (mbuf_data == NULL)
- goto error;
- }
-
- if (options->op_type == CPERF_AEAD) {
- uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
- RTE_ALIGN_CEIL(options->aead_aad_sz, 16));
-
- if (aead == NULL)
- goto error;
-
- memcpy(aead, test_vector->aad.data, test_vector->aad.length);
- }
-
- return mbuf;
-error:
- if (mbuf != NULL)
- rte_pktmbuf_free(mbuf);
-
- return NULL;
-}
-
void *
cperf_verify_test_constructor(struct rte_mempool *sess_mp,
uint8_t dev_id, uint16_t qp_id,
@@ -188,8 +85,6 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_verify_ctx *ctx = NULL;
- unsigned int mbuf_idx = 0;
- char pool_name[32] = "";
ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
if (ctx == NULL)
@@ -202,7 +97,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the cryptop operation */
+ /* IV goes at the end of the crypto operation */
uint16_t iv_offset = sizeof(struct rte_crypto_op) +
sizeof(struct rte_crypto_sym_op);
@@ -211,80 +106,14 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
if (ctx->sess == NULL)
goto err;
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_in == NULL)
- goto err;
-
- /* Generate mbufs_in with plaintext populated for test */
- ctx->mbufs_in = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
- options, test_vector);
- if (ctx->mbufs_in[mbuf_idx] == NULL)
- goto err;
- }
-
- if (options->out_of_place == 1) {
-
- snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
- dev_id);
-
- ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
- pool_name, options->pool_sz, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
- rte_socket_id());
-
- if (ctx->pkt_mbuf_pool_out == NULL)
- goto err;
- }
-
- ctx->mbufs_out = rte_malloc(NULL,
- (sizeof(struct rte_mbuf *) *
- ctx->options->pool_sz), 0);
-
- for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
- if (options->out_of_place == 1) {
- ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
- if (ctx->mbufs_out[mbuf_idx] == NULL)
- goto err;
- } else {
- ctx->mbufs_out[mbuf_idx] = NULL;
- }
- }
-
- snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
- dev_id);
-
- uint16_t priv_size = test_vector->cipher_iv.length +
- test_vector->auth_iv.length + test_vector->aead_iv.length;
- ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz,
- 512, priv_size, rte_socket_id());
- if (ctx->crypto_op_pool == NULL)
+ if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
+ &ctx->src_buf_offset, &ctx->dst_buf_offset,
+ &ctx->pool) < 0)
goto err;
return ctx;
err:
- cperf_verify_test_free(ctx, mbuf_idx);
+ cperf_verify_test_free(ctx);
return NULL;
}
@@ -362,10 +191,13 @@ cperf_verify_op(struct rte_crypto_op *op,
break;
case CPERF_AEAD:
cipher = 1;
- cipher_offset = vector->aad.length;
+ cipher_offset = 0;
auth = 1;
- auth_offset = vector->aad.length + options->test_buffer_size;
+ auth_offset = options->test_buffer_size;
break;
+ default:
+ res = 1;
+ goto out;
}
if (cipher == 1) {
@@ -386,9 +218,39 @@ cperf_verify_op(struct rte_crypto_op *op,
options->digest_sz);
}
+out:
+ rte_free(data);
return !!res;
}
+static void
+cperf_mbuf_set(struct rte_mbuf *mbuf,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint32_t segment_sz = options->segment_sz;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+ uint32_t remaining_bytes = options->max_buffer_size;
+
+ while (remaining_bytes) {
+ mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+
+ if (remaining_bytes <= segment_sz) {
+ memcpy(mbuf_data, test_data, remaining_bytes);
+ return;
+ }
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ remaining_bytes -= segment_sz;
+ test_data += segment_sz;
+ mbuf = mbuf->next;
+ }
+}
+
int
cperf_verify_test_runner(void *test_ctx)
{
@@ -400,7 +262,7 @@ cperf_verify_test_runner(void *test_ctx)
static int only_once;
- uint64_t i, m_idx = 0;
+ uint64_t i;
uint16_t ops_unused = 0;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
@@ -413,7 +275,7 @@ cperf_verify_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
@@ -440,11 +302,9 @@ cperf_verify_test_runner(void *test_ctx)
uint16_t ops_needed = burst_size - ops_unused;
- /* Allocate crypto ops from pool */
- if (ops_needed != rte_crypto_op_bulk_alloc(
- ctx->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops, ops_needed)) {
+ /* Allocate objects containing crypto operations and mbufs */
+ if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
+ ops_needed) != 0) {
RTE_LOG(ERR, USER1,
"Failed to allocate more crypto operations "
"from the the crypto operation pool.\n"
@@ -454,11 +314,18 @@ cperf_verify_test_runner(void *test_ctx)
}
/* Setup crypto op, attach mbuf etc */
- (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
- &ctx->mbufs_out[m_idx],
+ (ctx->populate_ops)(ops, ctx->src_buf_offset,
+ ctx->dst_buf_offset,
ops_needed, ctx->sess, ctx->options,
ctx->test_vector, iv_offset);
+
+ /* Populate the mbuf with the test vector, for verification */
+ for (i = 0; i < ops_needed; i++)
+ cperf_mbuf_set(ops[i]->sym->m_src,
+ ctx->options,
+ ctx->test_vector);
+
#ifdef CPERF_LINEARIZATION_ENABLE
if (linearize) {
/* PMD doesn't support scatter-gather and source buffer
@@ -488,10 +355,6 @@ cperf_verify_test_runner(void *test_ctx)
ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
ops_processed, ctx->options->max_burst_size);
- m_idx += ops_needed;
- if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
- m_idx = 0;
-
if (ops_deqd == 0) {
/**
* Count dequeue polls which didn't return any
@@ -506,13 +369,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -534,13 +394,10 @@ cperf_verify_test_runner(void *test_ctx)
if (cperf_verify_op(ops_processed[i], ctx->options,
ctx->test_vector))
ops_failed++;
- /* free crypto ops so they can be reused. We don't free
- * the mbufs here as we don't want to reuse them as
- * the crypto operation will change the data and cause
- * failures.
- */
- rte_crypto_op_free(ops_processed[i]);
}
+ /* Free crypto ops so they can be reused. */
+ rte_mempool_put_bulk(ctx->pool,
+ (void **)ops_processed, ops_deqd);
ops_deqd_total += ops_deqd;
}
@@ -594,7 +451,5 @@ cperf_verify_test_destructor(void *arg)
if (ctx == NULL)
return;
- rte_cryptodev_stop(ctx->dev_id);
-
- cperf_verify_test_free(ctx, ctx->options->pool_sz);
+ cperf_verify_test_free(ctx);
}
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 99f5d3e0..29373f5b 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -35,6 +35,9 @@
#include <rte_eal.h>
#include <rte_cryptodev.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#endif
#include "cperf.h"
#include "cperf_options.h"
@@ -42,6 +45,7 @@
#include "cperf_test_throughput.h"
#include "cperf_test_latency.h"
#include "cperf_test_verify.h"
+#include "cperf_test_pmd_cyclecount.h"
#define NUM_SESSIONS 2048
#define SESS_MEMPOOL_CACHE_SIZE 64
@@ -49,7 +53,8 @@
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
[CPERF_TEST_TYPE_LATENCY] = "latency",
- [CPERF_TEST_TYPE_VERIFY] = "verify"
+ [CPERF_TEST_TYPE_VERIFY] = "verify",
+ [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
};
const char *cperf_op_type_strs[] = {
@@ -75,6 +80,11 @@ const struct cperf_test cperf_testmap[] = {
cperf_verify_test_constructor,
cperf_verify_test_runner,
cperf_verify_test_destructor
+ },
+ [CPERF_TEST_TYPE_PMDCC] = {
+ cperf_pmd_cyclecount_test_constructor,
+ cperf_pmd_cyclecount_test_runner,
+ cperf_pmd_cyclecount_test_destructor
}
};
@@ -83,7 +93,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
struct rte_mempool *session_pool_socket[])
{
uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
- unsigned int i;
+ unsigned int i, j;
int ret;
enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
@@ -112,21 +122,53 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
max_sess_size = sess_size;
}
+ /*
+ * Calculate number of needed queue pairs, based on the amount
+ * of available number of logical cores and crypto devices.
+ * For instance, if there are 4 cores and 2 crypto devices,
+ * 2 queue pairs will be set up per device.
+ */
+ opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
+ (nb_lcores / enabled_cdev_count) + 1 :
+ nb_lcores / enabled_cdev_count;
+
for (i = 0; i < enabled_cdev_count &&
i < RTE_CRYPTO_MAX_DEVS; i++) {
cdev_id = enabled_cdevs[i];
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ /*
+ * If multi-core scheduler is used, limit the number
+ * of queue pairs to 1, as there is no way to know
+ * how many cores are being used by the PMD, and
+ * how many will be available for the application.
+ */
+ if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
+ rte_cryptodev_scheduler_mode_get(cdev_id) ==
+ CDEV_SCHED_MODE_MULTICORE)
+ opts->nb_qps = 1;
+#endif
+
+ struct rte_cryptodev_info cdev_info;
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+ rte_cryptodev_info_get(cdev_id, &cdev_info);
+ if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
+ printf("Number of needed queue pairs is higher "
+ "than the maximum number of queue pairs "
+ "per device.\n");
+ printf("Lower the number of cores or increase "
+ "the number of crypto devices\n");
+ return -EINVAL;
+ }
struct rte_cryptodev_config conf = {
- .nb_queue_pairs = 1,
- .socket_id = socket_id
+ .nb_queue_pairs = opts->nb_qps,
+ .socket_id = socket_id
};
struct rte_cryptodev_qp_conf qp_conf = {
- .nb_descriptors = 2048
+ .nb_descriptors = opts->nb_descriptors
};
-
if (session_pool_socket[socket_id] == NULL) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
@@ -158,14 +200,16 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
return -EINVAL;
}
- ret = rte_cryptodev_queue_pair_setup(cdev_id, 0,
+ for (j = 0; j < opts->nb_qps; j++) {
+ ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
&qp_conf, socket_id,
session_pool_socket[socket_id]);
if (ret < 0) {
printf("Failed to setup queue pair %u on "
- "cryptodev %u", 0, cdev_id);
+ "cryptodev %u", j, cdev_id);
return -EINVAL;
}
+ }
ret = rte_cryptodev_start(cdev_id);
if (ret < 0) {
@@ -380,6 +424,7 @@ main(int argc, char **argv)
struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
int nb_cryptodevs = 0;
+ uint16_t total_nb_qps = 0;
uint8_t cdev_id, i;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
@@ -410,11 +455,12 @@ main(int argc, char **argv)
goto err;
}
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
+ session_pool_socket);
+
if (!opts.silent)
cperf_options_dump(&opts);
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
- session_pool_socket);
if (nb_cryptodevs < 1) {
RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
"device type\n");
@@ -464,23 +510,29 @@ main(int argc, char **argv)
if (!opts.silent)
show_test_vector(t_vec);
+ total_nb_qps = nb_cryptodevs * opts.nb_qps;
+
i = 0;
+ uint8_t qp_id = 0, cdev_index = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
+ cdev_id = enabled_cdevs[cdev_index];
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
- ctx[cdev_id] = cperf_testmap[opts.test].constructor(
- session_pool_socket[socket_id], cdev_id, 0,
+ ctx[i] = cperf_testmap[opts.test].constructor(
+ session_pool_socket[socket_id], cdev_id, qp_id,
&opts, t_vec, &op_fns);
- if (ctx[cdev_id] == NULL) {
+ if (ctx[i] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
goto err;
}
+ qp_id = (qp_id + 1) % opts.nb_qps;
+ if (qp_id == 0)
+ cdev_index++;
i++;
}
@@ -494,19 +546,17 @@ main(int argc, char **argv)
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
rte_eal_remote_launch(cperf_testmap[opts.test].runner,
- ctx[cdev_id], lcore_id);
+ ctx[i], lcore_id);
i++;
}
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
rte_eal_wait_lcore(lcore_id);
i++;
@@ -525,15 +575,17 @@ main(int argc, char **argv)
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[i];
-
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+
free_test_vector(t_vec, &opts);
printf("\n");
@@ -542,16 +594,20 @@ main(int argc, char **argv)
err:
i = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (i == nb_cryptodevs)
+ if (i == total_nb_qps)
break;
cdev_id = enabled_cdevs[i];
- if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
- cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ if (ctx[i] && cperf_testmap[opts.test].destructor)
+ cperf_testmap[opts.test].destructor(ctx[i]);
i++;
}
+ for (i = 0; i < nb_cryptodevs &&
+ i < RTE_CRYPTO_MAX_DEVS; i++)
+ rte_cryptodev_stop(enabled_cdevs[i]);
+
free_test_vector(t_vec, &opts);
printf("\n");
diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index 41020760..0fadab4a 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -36,6 +36,7 @@
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_eventdev.h>
+#include <rte_service.h>
#define CLNRM "\x1b[0m"
#define CLRED "\x1b[31m"
@@ -92,25 +93,43 @@ evt_has_all_types_queue(uint8_t dev_id)
true : false;
}
-static inline uint32_t
-evt_sched_type2queue_cfg(uint8_t sched_type)
+static inline int
+evt_service_setup(uint8_t dev_id)
{
- uint32_t ret;
-
- switch (sched_type) {
- case RTE_SCHED_TYPE_ATOMIC:
- ret = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
- break;
- case RTE_SCHED_TYPE_ORDERED:
- ret = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
- break;
- case RTE_SCHED_TYPE_PARALLEL:
- ret = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
- break;
- default:
- rte_panic("Invalid sched_type %d\n", sched_type);
+ uint32_t service_id;
+ int32_t core_cnt;
+ unsigned int lcore = 0;
+ uint32_t core_array[RTE_MAX_LCORE];
+ uint8_t cnt;
+ uint8_t min_cnt = UINT8_MAX;
+
+ if (evt_has_distributed_sched(dev_id))
+ return 0;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ if (!rte_event_dev_service_id_get(dev_id, &service_id)) {
+ core_cnt = rte_service_lcore_list(core_array,
+ RTE_MAX_LCORE);
+ if (core_cnt < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (core_cnt--) {
+ /* Reset default mapping */
+ rte_service_map_lcore_set(service_id,
+ core_array[core_cnt], 0);
+ cnt = rte_service_lcore_count_services(
+ core_array[core_cnt]);
+ if (cnt < min_cnt) {
+ lcore = core_array[core_cnt];
+ min_cnt = cnt;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, lcore, 1))
+ return -ENOENT;
}
- return ret;
+ return 0;
}
#endif /* _EVT_COMMON_*/
diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
index 65e22f84..e2187dfc 100644
--- a/app/test-eventdev/evt_options.c
+++ b/app/test-eventdev/evt_options.c
@@ -114,13 +114,6 @@ evt_parse_test_name(struct evt_options *opt, const char *arg)
}
static int
-evt_parse_slcore(struct evt_options *opt, const char *arg)
-{
- opt->slcore = atoi(arg);
- return 0;
-}
-
-static int
evt_parse_socket_id(struct evt_options *opt, const char *arg)
{
opt->socket_id = atoi(arg);
@@ -188,7 +181,6 @@ usage(char *program)
"\t--test : name of the test application to run\n"
"\t--socket_id : socket_id of application resources\n"
"\t--pool_sz : pool size of the mempool\n"
- "\t--slcore : lcore id of the scheduler\n"
"\t--plcores : list of lcore ids for producers\n"
"\t--wlcores : list of lcore ids for workers\n"
"\t--stlist : list of scheduled types of the stages\n"
@@ -254,7 +246,6 @@ static struct option lgopts[] = {
{ EVT_POOL_SZ, 1, 0, 0 },
{ EVT_NB_PKTS, 1, 0, 0 },
{ EVT_WKR_DEQ_DEP, 1, 0, 0 },
- { EVT_SCHED_LCORE, 1, 0, 0 },
{ EVT_SCHED_TYPE_LIST, 1, 0, 0 },
{ EVT_FWD_LATENCY, 0, 0, 0 },
{ EVT_QUEUE_PRIORITY, 0, 0, 0 },
@@ -278,7 +269,6 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
{ EVT_POOL_SZ, evt_parse_pool_sz},
{ EVT_NB_PKTS, evt_parse_nb_pkts},
{ EVT_WKR_DEQ_DEP, evt_parse_wkr_deq_dep},
- { EVT_SCHED_LCORE, evt_parse_slcore},
{ EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
index d8a9fdcc..a9a91252 100644
--- a/app/test-eventdev/evt_options.h
+++ b/app/test-eventdev/evt_options.h
@@ -47,7 +47,6 @@
#define EVT_VERBOSE ("verbose")
#define EVT_DEVICE ("dev")
#define EVT_TEST ("test")
-#define EVT_SCHED_LCORE ("slcore")
#define EVT_PROD_LCORES ("plcores")
#define EVT_WORK_LCORES ("wlcores")
#define EVT_NB_FLOWS ("nb_flows")
@@ -67,7 +66,6 @@ struct evt_options {
bool plcores[RTE_MAX_LCORE];
bool wlcores[RTE_MAX_LCORE];
uint8_t sched_type_list[EVT_MAX_STAGES];
- int slcore;
uint32_t nb_flows;
int socket_id;
int pool_sz;
@@ -219,12 +217,6 @@ evt_dump_nb_flows(struct evt_options *opt)
}
static inline void
-evt_dump_scheduler_lcore(struct evt_options *opt)
-{
- evt_dump("scheduler lcore", "%d", opt->slcore);
-}
-
-static inline void
evt_dump_worker_dequeue_depth(struct evt_options *opt)
{
evt_dump("worker deq depth", "%d", opt->wkr_deq_dep);
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 7e6c67d4..4ee0dea8 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -179,6 +179,12 @@ order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
+ ret = evt_service_setup(opt->dev_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index 80e14c08..7cfe7fac 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -292,9 +292,6 @@ order_launch_lcores(struct evt_test *test, struct evt_options *opt,
int64_t old_remaining = -1;
while (t->err == false) {
-
- rte_event_schedule(opt->dev_id);
-
uint64_t new_cycles = rte_get_timer_cycles();
int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index beadd9c3..eef69a4c 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -164,7 +164,7 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
/* q0 (ordered queue) configuration */
struct rte_event_queue_conf q0_ordered_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
.nb_atomic_flows = opt->nb_flows,
.nb_atomic_order_sequences = opt->nb_flows,
};
@@ -177,7 +177,7 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
/* q1 (atomic queue) configuration */
struct rte_event_queue_conf q1_atomic_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.nb_atomic_flows = opt->nb_flows,
.nb_atomic_order_sequences = opt->nb_flows,
};
@@ -192,6 +192,12 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
+ ret = evt_service_setup(opt->dev_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 9c3efa3a..0e9f2db0 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -221,6 +221,12 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
+ ret = evt_service_setup(opt->dev_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 7b092994..e77b4723 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -88,18 +88,6 @@ perf_producer(void *arg)
return 0;
}
-static inline int
-scheduler(void *arg)
-{
- struct test_perf *t = arg;
- const uint8_t dev_id = t->opt->dev_id;
-
- while (t->done == false)
- rte_event_schedule(dev_id);
-
- return 0;
-}
-
static inline uint64_t
processed_pkts(struct test_perf *t)
{
@@ -163,15 +151,6 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
port_idx++;
}
- /* launch scheduler */
- if (!evt_has_distributed_sched(opt->dev_id)) {
- ret = rte_eal_remote_launch(scheduler, t, opt->slcore);
- if (ret) {
- evt_err("failed to launch sched %d", opt->slcore);
- return ret;
- }
- }
-
const uint64_t total_pkts = opt->nb_pkts *
evt_nr_active_lcores(opt->plcores);
@@ -307,10 +286,9 @@ int
perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
{
unsigned int lcores;
- bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
- /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
- lcores = need_slcore ? 4 : 3;
+ /* N producer + N worker + 1 master */
+ lcores = 3;
if (rte_lcore_count() < lcores) {
evt_err("test need minimum %d lcores", lcores);
@@ -322,10 +300,6 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
evt_err("worker lcores overlaps with master lcore");
return -1;
}
- if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
- evt_err("worker lcores overlaps with scheduler lcore");
- return -1;
- }
if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
evt_err("worker lcores overlaps producer lcores");
return -1;
@@ -344,10 +318,6 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
evt_err("producer lcores overlaps with master lcore");
return -1;
}
- if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
- evt_err("producer lcores overlaps with scheduler lcore");
- return -1;
- }
if (evt_has_disabled_lcore(opt->plcores)) {
evt_err("one or more producer lcores are not enabled");
return -1;
@@ -357,17 +327,6 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
return -1;
}
- /* Validate scheduler lcore */
- if (!evt_has_distributed_sched(opt->dev_id) &&
- opt->slcore == (int)rte_get_master_lcore()) {
- evt_err("scheduler lcore and master lcore should be different");
- return -1;
- }
- if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
- evt_err("scheduler lcore is not enabled");
- return -1;
- }
-
if (evt_has_invalid_stage(opt))
return -1;
@@ -405,8 +364,6 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
evt_dump_producer_lcores(opt);
evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
evt_dump_worker_lcores(opt);
- if (!evt_has_distributed_sched(opt->dev_id))
- evt_dump_scheduler_lcore(opt);
evt_dump_nb_stages(opt);
evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
evt_dump("nb_evdev_queues", "%d", nb_queues);
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 4956586c..c6fc70cd 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -159,6 +159,7 @@ int perf_test_setup(struct evt_test *test, struct evt_options *opt);
int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues);
+int perf_event_dev_service_setup(uint8_t dev_id);
int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
int (*worker)(void *));
void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 658c08a0..d843eea1 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -205,8 +205,8 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
};
/* queue configurations */
for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
- q_conf.event_queue_cfg = evt_sched_type2queue_cfg
- (opt->sched_type_list[queue % nb_stages]);
+ q_conf.schedule_type =
+ (opt->sched_type_list[queue % nb_stages]);
if (opt->q_priority) {
uint8_t stage_pos = queue % nb_stages;
@@ -232,6 +232,12 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
+ ret = evt_service_setup(opt->dev_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+
ret = rte_event_dev_start(opt->dev_id);
if (ret) {
evt_err("failed to start eventdev %d", opt->dev_id);
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index c36be19f..d21308fc 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -48,6 +48,8 @@ SRCS-y := testpmd.c
SRCS-y += parameters.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_mtr.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_tm.c
SRCS-y += config.c
SRCS-y += iofwd.c
SRCS-y += macfwd.c
@@ -59,6 +61,10 @@ SRCS-y += csumonly.c
SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC)$(CONFIG_RTE_LIBRTE_SCHED),yy)
+SRCS-y += tm.c
+endif
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
@@ -81,6 +87,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y)
LDLIBS += -lrte_pmd_xenvirt
endif
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC),y)
+LDLIBS += -lrte_pmd_softnic
+endif
+
endif
CFLAGS_cmdline.o := -D_GNU_SOURCE
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cd8c3585..f71d9630 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -99,6 +99,8 @@
#include <rte_pmd_bnxt.h>
#endif
#include "testpmd.h"
+#include "cmdline_mtr.h"
+#include "cmdline_tm.h"
static struct cmdline *testpmd_cl;
@@ -230,6 +232,27 @@ static void cmd_help_long_parsed(void *parsed_result,
"clear vf stats (port_id) (vf_id)\n"
" Reset a VF's statistics.\n\n"
+
+ "show port (port_id) pctype mapping\n"
+ " Get flow ptype to pctype mapping on a port\n\n"
+
+ "show port meter stats (port_id) (meter_id) (clear)\n"
+ " Get meter stats on a port\n\n"
+ "show port tm cap (port_id)\n"
+ " Display the port TM capability.\n\n"
+
+ "show port tm level cap (port_id) (level_id)\n"
+ " Display the port TM hierarchical level capability.\n\n"
+
+ "show port tm node cap (port_id) (node_id)\n"
+ " Display the port TM node capability.\n\n"
+
+ "show port tm node type (port_id) (node_id)\n"
+ " Display the port TM node type.\n\n"
+
+ "show port tm node stats (port_id) (node_id) (clear)\n"
+ " Display the port TM node stats.\n\n"
+
);
}
@@ -423,13 +446,27 @@ static void cmd_help_long_parsed(void *parsed_result,
"tso show (portid)"
" Display the status of TCP Segmentation Offload.\n\n"
- "gro (on|off) (port_id)"
+ "set port (port_id) gro on|off\n"
" Enable or disable Generic Receive Offload in"
" csum forwarding engine.\n\n"
- "gro set (max_flow_num) (max_item_num_per_flow) (port_id)\n"
- " Set max flow number and max packet number per-flow"
- " for GRO.\n\n"
+ "show port (port_id) gro\n"
+ " Display GRO configuration.\n\n"
+
+ "set gro flush (cycles)\n"
+ " Set the cycle to flush GROed packets from"
+ " reassembly tables.\n\n"
+
+ "set port (port_id) gso (on|off)"
+ " Enable or disable Generic Segmentation Offload in"
+ " csum forwarding engine.\n\n"
+
+ "set gso segsz (length)\n"
+ " Set max packet length for output GSO segments,"
+ " including packet header and payload.\n\n"
+
+ "show port (port_id) gso\n"
+ " Show GSO configuration.\n\n"
"set fwd (%s)\n"
" Set packet forwarding mode.\n\n"
@@ -489,6 +526,10 @@ static void cmd_help_long_parsed(void *parsed_result,
" e.g., 'set stat_qmap rx 0 2 5' sets rx queue 2"
" on port 0 to mapping 5.\n\n"
+ "set xstats-hide-zero on|off\n"
+ " Set the option to hide the zero values"
+ " for xstats display.\n"
+
"set port (port_id) vf (vf_id) rx|tx on|off\n"
" Enable/Disable a VF receive/tranmit from a port\n\n"
@@ -619,6 +660,11 @@ static void cmd_help_long_parsed(void *parsed_result,
"E-tag set filter del e-tag-id (value) port (port_id)\n"
" Delete an E-tag forwarding filter on a port\n\n"
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+ "set port tm hierarchy default (port_id)\n"
+ " Set default traffic Management hierarchy on a port\n\n"
+
+#endif
"ddp add (port_id) (profile_path[,output_path])\n"
" Load a profile package on a port\n\n"
@@ -637,6 +683,100 @@ static void cmd_help_long_parsed(void *parsed_result,
"ptype mapping update (port_id) (hw_ptype) (sw_ptype)\n"
" Update a ptype mapping item on a port\n\n"
+ "set port (port_id) queue-region region_id (value) "
+ "queue_start_index (value) queue_num (value)\n"
+ " Set a queue region on a port\n\n"
+
+ "set port (port_id) queue-region region_id (value) "
+ "flowtype (value)\n"
+ " Set a flowtype region index on a port\n\n"
+
+ "set port (port_id) queue-region UP (value) region_id (value)\n"
+ " Set the mapping of User Priority to "
+ "queue region on a port\n\n"
+
+ "set port (port_id) queue-region flush (on|off)\n"
+ " flush all queue region related configuration\n\n"
+
+ "add port meter profile srtcm_rfc2697 (port_id) (profile_id) (cir) (cbs) (ebs) (color_aware)\n"
+ " meter profile add - srtcm rfc 2697\n\n"
+
+ "add port meter profile trtcm_rfc2698 (port_id) (profile_id) (cir) (pir) (cbs) (pbs)\n"
+ " meter profile add - trtcm rfc 2698\n\n"
+
+ "add port meter profile trtcm_rfc4115 (port_id) (profile_id) (cir) (eir) (cbs) (ebs)\n"
+ " meter profile add - trtcm rfc 4115\n\n"
+
+ "del port meter profile (port_id) (profile_id)\n"
+ " meter profile delete\n\n"
+
+ "set port meter (port_id) (mtr_id) (profile_id) (g_action) (y_action) (r_action) (stats_mask) (shared)\n"
+ " meter create\n\n"
+
+ "del port meter (port_id) (mtr_id)\n"
+ " meter delete\n\n"
+
+ "set port meter profile (port_id) (mtr_id) (profile_id)\n"
+ " meter update meter profile\n\n"
+
+ "set port meter policer action (port_id) (mtr_id) (color) (action)\n"
+ " meter update policer action\n\n"
+
+ "set port meter stats mask (port_id) (mtr_id) (stats_mask)\n"
+ " meter update stats\n\n"
+
+ "show port (port_id) queue-region\n"
+ " show all queue region related configuration info\n\n"
+
+ "add port tm node shaper profile (port_id) (shaper_profile_id)"
+ " (tb_rate) (tb_size) (packet_length_adjust)\n"
+ " Add port tm node private shaper profile.\n\n"
+
+ "del port tm node shaper profile (port_id) (shaper_profile_id)\n"
+ " Delete port tm node private shaper profile.\n\n"
+
+ "add port tm node shared shaper (port_id) (shared_shaper_id)"
+ " (shaper_profile_id)\n"
+ " Add/update port tm node shared shaper.\n\n"
+
+ "del port tm node shared shaper (port_id) (shared_shaper_id)\n"
+ " Delete port tm node shared shaper.\n\n"
+
+ "set port tm node shaper profile (port_id) (node_id)"
+ " (shaper_profile_id)\n"
+ " Set port tm node shaper profile.\n\n"
+
+ "add port tm node wred profile (port_id) (wred_profile_id)"
+ " (color_g) (min_th_g) (max_th_g) (maxp_inv_g) (wq_log2_g)"
+ " (color_y) (min_th_y) (max_th_y) (maxp_inv_y) (wq_log2_y)"
+ " (color_r) (min_th_r) (max_th_r) (maxp_inv_r) (wq_log2_r)\n"
+ " Add port tm node wred profile.\n\n"
+
+ "del port tm node wred profile (port_id) (wred_profile_id)\n"
+ " Delete port tm node wred profile.\n\n"
+
+ "add port tm nonleaf node (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight) (level_id) (shaper_profile_id)"
+ " (n_sp_priorities) (stats_mask) (n_shared_shapers)"
+ " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
+ " Add port tm nonleaf node.\n\n"
+
+ "add port tm leaf node (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight) (level_id) (shaper_profile_id)"
+ " (cman_mode) (wred_profile_id) (stats_mask) (n_shared_shapers)"
+ " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n"
+ " Add port tm leaf node.\n\n"
+
+ "del port tm node (port_id) (node_id)\n"
+ " Delete port tm node.\n\n"
+
+ "set port tm node parent (port_id) (node_id) (parent_node_id)"
+ " (priority) (weight)\n"
+ " Set port tm node parent.\n\n"
+
+ "port tm hierarchy commit (port_id) (clean_on_fail)\n"
+ " Commit tm hierarchy.\n\n"
+
, list_pkt_forwarding_modes()
);
}
@@ -675,13 +815,14 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config all max-pkt-len (value)\n"
" Set the max packet length.\n\n"
- "port config all (crc-strip|scatter|rx-cksum|hw-vlan|hw-vlan-filter|"
+ "port config all (crc-strip|scatter|rx-cksum|rx-timestamp|hw-vlan|hw-vlan-filter|"
"hw-vlan-strip|hw-vlan-extend|drop-en)"
" (on|off)\n"
" Set crc-strip/scatter/rx-checksum/hardware-vlan/drop_en"
" for ports.\n\n"
- "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none)\n"
+ "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|"
+ "geneve|nvgre|none|<flowtype_id>)\n"
" Set the RSS mode.\n\n"
"port config port-id rss reta (hash,queue)[,(hash,queue)]\n"
@@ -716,6 +857,13 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config (port_id|all) l2-tunnel E-tag"
" (enable|disable)\n"
" Enable/disable the E-tag support.\n\n"
+
+ "port config (port_id) pctype mapping reset\n"
+ " Reset flow type to pctype mapping on a port\n\n"
+
+ "port config (port_id) pctype mapping update"
+ " (pctype_id_0[,pctype_id_1]*) (flow_type_id)\n"
+ " Update a flow type to pctype mapping item on a port\n\n"
);
}
@@ -878,8 +1026,8 @@ static void cmd_help_long_parsed(void *parsed_result,
"set_hash_input_set (port_id) (ipv4|ipv4-frag|"
"ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
"ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
- "l2_payload) (ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|"
- "dst-ipv6|ipv4-tos|ipv4-proto|ipv6-tc|"
+ "l2_payload|<flowtype_id>) (ovlan|ivlan|src-ipv4|dst-ipv4|"
+ "src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|ipv6-tc|"
"ipv6-next-header|udp-src-port|udp-dst-port|"
"tcp-src-port|tcp-dst-port|sctp-src-port|"
"sctp-dst-port|sctp-veri-tag|udp-key|gre-key|fld-1st|"
@@ -972,6 +1120,8 @@ static void cmd_operate_port_parsed(void *parsed_result,
stop_port(RTE_PORT_ALL);
else if (!strcmp(res->name, "close"))
close_port(RTE_PORT_ALL);
+ else if (!strcmp(res->name, "reset"))
+ reset_port(RTE_PORT_ALL);
else
printf("Unknown parameter\n");
}
@@ -981,14 +1131,14 @@ cmdline_parse_token_string_t cmd_operate_port_all_cmd =
"port");
cmdline_parse_token_string_t cmd_operate_port_all_port =
TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, name,
- "start#stop#close");
+ "start#stop#close#reset");
cmdline_parse_token_string_t cmd_operate_port_all_all =
TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, value, "all");
cmdline_parse_inst_t cmd_operate_port = {
.f = cmd_operate_port_parsed,
.data = NULL,
- .help_str = "port start|stop|close all: Start/Stop/Close all ports",
+ .help_str = "port start|stop|close all: Start/Stop/Close/Reset all ports",
.tokens = {
(void *)&cmd_operate_port_all_cmd,
(void *)&cmd_operate_port_all_port,
@@ -1016,6 +1166,8 @@ static void cmd_operate_specific_port_parsed(void *parsed_result,
stop_port(res->value);
else if (!strcmp(res->name, "close"))
close_port(res->value);
+ else if (!strcmp(res->name, "reset"))
+ reset_port(res->value);
else
printf("Unknown parameter\n");
}
@@ -1025,7 +1177,7 @@ cmdline_parse_token_string_t cmd_operate_specific_port_cmd =
keyword, "port");
cmdline_parse_token_string_t cmd_operate_specific_port_port =
TOKEN_STRING_INITIALIZER(struct cmd_operate_specific_port_result,
- name, "start#stop#close");
+ name, "start#stop#close#reset");
cmdline_parse_token_num_t cmd_operate_specific_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_operate_specific_port_result,
value, UINT8);
@@ -1033,7 +1185,7 @@ cmdline_parse_token_num_t cmd_operate_specific_port_id =
cmdline_parse_inst_t cmd_operate_specific_port = {
.f = cmd_operate_specific_port_parsed,
.data = NULL,
- .help_str = "port start|stop|close <port_id>: Start/Stop/Close port_id",
+ .help_str = "port start|stop|close <port_id>: Start/Stop/Close/Reset port_id",
.tokens = {
(void *)&cmd_operate_specific_port_cmd,
(void *)&cmd_operate_specific_port_port,
@@ -1088,7 +1240,7 @@ cmdline_parse_inst_t cmd_operate_attach_port = {
struct cmd_operate_detach_port_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t keyword;
- uint8_t port_id;
+ portid_t port_id;
};
static void cmd_operate_detach_port_parsed(void *parsed_result,
@@ -1111,7 +1263,7 @@ cmdline_parse_token_string_t cmd_operate_detach_port_keyword =
keyword, "detach");
cmdline_parse_token_num_t cmd_operate_detach_port_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_operate_detach_port_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_operate_detach_port = {
.f = cmd_operate_detach_port_parsed,
@@ -1490,7 +1642,7 @@ struct cmd_config_mtu_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t keyword;
cmdline_fixed_string_t mtu;
- uint8_t port_id;
+ portid_t port_id;
uint16_t value;
};
@@ -1518,7 +1670,7 @@ cmdline_parse_token_string_t cmd_config_mtu_mtu =
TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, keyword,
"mtu");
cmdline_parse_token_num_t cmd_config_mtu_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_config_mtu_value =
TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, value, UINT16);
@@ -1584,6 +1736,15 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
printf("Unknown parameter\n");
return;
}
+ } else if (!strcmp(res->name, "rx-timestamp")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.hw_timestamp = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.hw_timestamp = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
} else if (!strcmp(res->name, "hw-vlan")) {
if (!strcmp(res->value, "on")) {
rx_mode.hw_vlan_filter = 1;
@@ -1652,7 +1813,7 @@ cmdline_parse_token_string_t cmd_config_rx_mode_flag_all =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, all, "all");
cmdline_parse_token_string_t cmd_config_rx_mode_flag_name =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, name,
- "crc-strip#scatter#rx-cksum#hw-vlan#"
+ "crc-strip#scatter#rx-cksum#rx-timestamp#hw-vlan#"
"hw-vlan-filter#hw-vlan-strip#hw-vlan-extend");
cmdline_parse_token_string_t cmd_config_rx_mode_flag_value =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, value,
@@ -1661,7 +1822,7 @@ cmdline_parse_token_string_t cmd_config_rx_mode_flag_value =
cmdline_parse_inst_t cmd_config_rx_mode_flag = {
.f = cmd_config_rx_mode_flag_parsed,
.data = NULL,
- .help_str = "port config all crc-strip|scatter|rx-cksum|hw-vlan|"
+ .help_str = "port config all crc-strip|scatter|rx-cksum|rx-timestamp|hw-vlan|"
"hw-vlan-filter|hw-vlan-strip|hw-vlan-extend on|off",
.tokens = {
(void *)&cmd_config_rx_mode_flag_port,
@@ -1688,7 +1849,7 @@ cmd_config_rss_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_config_rss *res = parsed_result;
- struct rte_eth_rss_conf rss_conf;
+ struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, };
int diag;
uint8_t i;
@@ -1716,6 +1877,9 @@ cmd_config_rss_parsed(void *parsed_result,
rss_conf.rss_hf = ETH_RSS_NVGRE;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
+ else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
+ atoi(res->value) < 64)
+ rss_conf.rss_hf = 1ULL << atoi(res->value);
else {
printf("Unknown parameter\n");
return;
@@ -1739,14 +1903,13 @@ cmdline_parse_token_string_t cmd_config_rss_all =
cmdline_parse_token_string_t cmd_config_rss_name =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss, name, "rss");
cmdline_parse_token_string_t cmd_config_rss_value =
- TOKEN_STRING_INITIALIZER(struct cmd_config_rss, value,
- "all#ip#tcp#udp#sctp#ether#port#vxlan#geneve#nvgre#none");
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rss, value, NULL);
cmdline_parse_inst_t cmd_config_rss = {
.f = cmd_config_rss_parsed,
.data = NULL,
.help_str = "port config all rss "
- "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none",
+ "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|<flowtype_id>",
.tokens = {
(void *)&cmd_config_rss_port,
(void *)&cmd_config_rss_keyword,
@@ -1761,7 +1924,7 @@ cmdline_parse_inst_t cmd_config_rss = {
struct cmd_config_rss_hash_key {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t rss_hash_key;
cmdline_fixed_string_t rss_type;
cmdline_fixed_string_t key;
@@ -1843,7 +2006,7 @@ cmdline_parse_token_string_t cmd_config_rss_hash_key_config =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, config,
"config");
cmdline_parse_token_num_t cmd_config_rss_hash_key_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_rss_hash_key, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rss_hash_key, port_id, UINT16);
cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_hash_key =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key,
rss_hash_key, "rss-hash-key");
@@ -1878,7 +2041,7 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
/* *** configure port rxq/txq start/stop *** */
struct cmd_config_rxtx_queue {
cmdline_fixed_string_t port;
- uint8_t portid;
+ portid_t portid;
cmdline_fixed_string_t rxtxq;
uint16_t qid;
cmdline_fixed_string_t opname;
@@ -1946,7 +2109,7 @@ cmd_config_rxtx_queue_parsed(void *parsed_result,
cmdline_parse_token_string_t cmd_config_rxtx_queue_port =
TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, port, "port");
cmdline_parse_token_num_t cmd_config_rxtx_queue_portid =
- TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, portid, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, portid, UINT16);
cmdline_parse_token_string_t cmd_config_rxtx_queue_rxtxq =
TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, rxtxq, "rxq#txq");
cmdline_parse_token_num_t cmd_config_rxtx_queue_qid =
@@ -1973,7 +2136,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
struct cmd_config_rss_reta {
cmdline_fixed_string_t port;
cmdline_fixed_string_t keyword;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t name;
cmdline_fixed_string_t list_name;
cmdline_fixed_string_t list_of_items;
@@ -2082,7 +2245,7 @@ cmdline_parse_token_string_t cmd_config_rss_reta_port =
cmdline_parse_token_string_t cmd_config_rss_reta_keyword =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, keyword, "config");
cmdline_parse_token_num_t cmd_config_rss_reta_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_rss_reta, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rss_reta, port_id, UINT16);
cmdline_parse_token_string_t cmd_config_rss_reta_name =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, name, "rss");
cmdline_parse_token_string_t cmd_config_rss_reta_list_name =
@@ -2109,7 +2272,7 @@ cmdline_parse_inst_t cmd_config_rss_reta = {
struct cmd_showport_reta {
cmdline_fixed_string_t show;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t rss;
cmdline_fixed_string_t reta;
uint16_t size;
@@ -2164,12 +2327,14 @@ cmd_showport_reta_parsed(void *parsed_result,
struct cmd_showport_reta *res = parsed_result;
struct rte_eth_rss_reta_entry64 reta_conf[8];
struct rte_eth_dev_info dev_info;
+ uint16_t max_reta_size;
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(res->port_id, &dev_info);
- if (dev_info.reta_size == 0 || res->size != dev_info.reta_size ||
- res->size > ETH_RSS_RETA_SIZE_512) {
- printf("Invalid redirection table size: %u\n", res->size);
+ max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
+ if (res->size == 0 || res->size > max_reta_size) {
+ printf("Invalid redirection table size: %u (1-%u)\n",
+ res->size, max_reta_size);
return;
}
@@ -2188,7 +2353,7 @@ cmdline_parse_token_string_t cmd_showport_reta_show =
cmdline_parse_token_string_t cmd_showport_reta_port =
TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, port, "port");
cmdline_parse_token_num_t cmd_showport_reta_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, port_id, UINT16);
cmdline_parse_token_string_t cmd_showport_reta_rss =
TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, rss, "rss");
cmdline_parse_token_string_t cmd_showport_reta_reta =
@@ -2219,7 +2384,7 @@ cmdline_parse_inst_t cmd_showport_reta = {
struct cmd_showport_rss_hash {
cmdline_fixed_string_t show;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t rss_hash;
cmdline_fixed_string_t rss_type;
cmdline_fixed_string_t key; /* optional argument */
@@ -2240,7 +2405,7 @@ cmdline_parse_token_string_t cmd_showport_rss_hash_show =
cmdline_parse_token_string_t cmd_showport_rss_hash_port =
TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, port, "port");
cmdline_parse_token_num_t cmd_showport_rss_hash_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_showport_rss_hash, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_rss_hash, port_id, UINT16);
cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash =
TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_hash,
"rss-hash");
@@ -2292,7 +2457,7 @@ cmdline_parse_inst_t cmd_showport_rss_hash_key = {
struct cmd_config_dcb {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t dcb;
cmdline_fixed_string_t vt;
cmdline_fixed_string_t vt_en;
@@ -2358,7 +2523,7 @@ cmdline_parse_token_string_t cmd_config_dcb_port =
cmdline_parse_token_string_t cmd_config_dcb_config =
TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, config, "config");
cmdline_parse_token_num_t cmd_config_dcb_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, port_id, UINT16);
cmdline_parse_token_string_t cmd_config_dcb_dcb =
TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, dcb, "dcb");
cmdline_parse_token_string_t cmd_config_dcb_vt =
@@ -2995,7 +3160,7 @@ struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
cmdline_fixed_string_t what;
cmdline_fixed_string_t all;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3022,7 +3187,7 @@ cmdline_parse_token_string_t cmd_rx_vlan_filter_all_all =
all, "all");
cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid =
TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_rx_vlan_filter_all = {
.f = cmd_rx_vlan_filter_all_parsed,
@@ -3148,7 +3313,7 @@ struct cmd_vlan_tpid_result {
cmdline_fixed_string_t vlan_type;
cmdline_fixed_string_t what;
uint16_t tp_id;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3210,7 +3375,7 @@ struct cmd_rx_vlan_filter_result {
cmdline_fixed_string_t rx_vlan;
cmdline_fixed_string_t what;
uint16_t vlan_id;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3237,7 +3402,7 @@ cmdline_parse_token_num_t cmd_rx_vlan_filter_vlanid =
vlan_id, UINT16);
cmdline_parse_token_num_t cmd_rx_vlan_filter_portid =
TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_rx_vlan_filter = {
.f = cmd_rx_vlan_filter_parsed,
@@ -3258,7 +3423,7 @@ cmdline_parse_inst_t cmd_rx_vlan_filter = {
struct cmd_tx_vlan_set_result {
cmdline_fixed_string_t tx_vlan;
cmdline_fixed_string_t set;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vlan_id;
};
@@ -3280,7 +3445,7 @@ cmdline_parse_token_string_t cmd_tx_vlan_set_set =
set, "set");
cmdline_parse_token_num_t cmd_tx_vlan_set_portid =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
vlan_id, UINT16);
@@ -3304,7 +3469,7 @@ cmdline_parse_inst_t cmd_tx_vlan_set = {
struct cmd_tx_vlan_set_qinq_result {
cmdline_fixed_string_t tx_vlan;
cmdline_fixed_string_t set;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vlan_id;
uint16_t vlan_id_outer;
};
@@ -3327,7 +3492,7 @@ cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_set =
set, "set");
cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_portid =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result,
vlan_id, UINT16);
@@ -3356,7 +3521,7 @@ struct cmd_tx_vlan_set_pvid_result {
cmdline_fixed_string_t tx_vlan;
cmdline_fixed_string_t set;
cmdline_fixed_string_t pvid;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vlan_id;
cmdline_fixed_string_t mode;
};
@@ -3385,7 +3550,7 @@ cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_pvid =
pvid, "pvid");
cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_vlan_id =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result,
vlan_id, UINT16);
@@ -3412,7 +3577,7 @@ cmdline_parse_inst_t cmd_tx_vlan_set_pvid = {
struct cmd_tx_vlan_reset_result {
cmdline_fixed_string_t tx_vlan;
cmdline_fixed_string_t reset;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3433,7 +3598,7 @@ cmdline_parse_token_string_t cmd_tx_vlan_reset_reset =
reset, "reset");
cmdline_parse_token_num_t cmd_tx_vlan_reset_portid =
TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_reset_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_tx_vlan_reset = {
.f = cmd_tx_vlan_reset_parsed,
@@ -3455,7 +3620,7 @@ struct cmd_csum_result {
cmdline_fixed_string_t mode;
cmdline_fixed_string_t proto;
cmdline_fixed_string_t hwsw;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3560,7 +3725,7 @@ cmdline_parse_token_string_t cmd_csum_hwsw =
hwsw, "hw#sw");
cmdline_parse_token_num_t cmd_csum_portid =
TOKEN_NUM_INITIALIZER(struct cmd_csum_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_csum_set = {
.f = cmd_csum_parsed,
@@ -3599,7 +3764,7 @@ struct cmd_csum_tunnel_result {
cmdline_fixed_string_t csum;
cmdline_fixed_string_t parse;
cmdline_fixed_string_t onoff;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3633,7 +3798,7 @@ cmdline_parse_token_string_t cmd_csum_tunnel_onoff =
onoff, "on#off");
cmdline_parse_token_num_t cmd_csum_tunnel_portid =
TOKEN_NUM_INITIALIZER(struct cmd_csum_tunnel_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_csum_tunnel = {
.f = cmd_csum_tunnel_parsed,
@@ -3654,7 +3819,7 @@ struct cmd_tso_set_result {
cmdline_fixed_string_t tso;
cmdline_fixed_string_t mode;
uint16_t tso_segsz;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -3697,7 +3862,7 @@ cmdline_parse_token_num_t cmd_tso_set_tso_segsz =
tso_segsz, UINT16);
cmdline_parse_token_num_t cmd_tso_set_portid =
TOKEN_NUM_INITIALIZER(struct cmd_tso_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_tso_set = {
.f = cmd_tso_set_parsed,
@@ -3737,11 +3902,11 @@ struct cmd_tunnel_tso_set_result {
cmdline_fixed_string_t tso;
cmdline_fixed_string_t mode;
uint16_t tso_segsz;
- uint8_t port_id;
+ portid_t port_id;
};
static void
-check_tunnel_tso_nic_support(uint8_t port_id)
+check_tunnel_tso_nic_support(portid_t port_id)
{
struct rte_eth_dev_info dev_info;
@@ -3814,7 +3979,7 @@ cmdline_parse_token_num_t cmd_tunnel_tso_set_tso_segsz =
tso_segsz, UINT16);
cmdline_parse_token_num_t cmd_tunnel_tso_set_portid =
TOKEN_NUM_INITIALIZER(struct cmd_tunnel_tso_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_tunnel_tso_set = {
.f = cmd_tunnel_tso_set_parsed,
@@ -3850,115 +4015,311 @@ cmdline_parse_inst_t cmd_tunnel_tso_show = {
};
/* *** SET GRO FOR A PORT *** */
-struct cmd_gro_result {
+struct cmd_gro_enable_result {
+ cmdline_fixed_string_t cmd_set;
+ cmdline_fixed_string_t cmd_port;
cmdline_fixed_string_t cmd_keyword;
- cmdline_fixed_string_t mode;
- uint8_t port_id;
+ cmdline_fixed_string_t cmd_onoff;
+ portid_t cmd_pid;
};
static void
-cmd_enable_gro_parsed(void *parsed_result,
+cmd_gro_enable_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_gro_result *res;
+ struct cmd_gro_enable_result *res;
res = parsed_result;
- setup_gro(res->mode, res->port_id);
+ if (!strcmp(res->cmd_keyword, "gro"))
+ setup_gro(res->cmd_onoff, res->cmd_pid);
+}
+
+cmdline_parse_token_string_t cmd_gro_enable_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result,
+ cmd_set, "set");
+cmdline_parse_token_string_t cmd_gro_enable_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result,
+ cmd_keyword, "port");
+cmdline_parse_token_num_t cmd_gro_enable_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_enable_result,
+ cmd_pid, UINT16);
+cmdline_parse_token_string_t cmd_gro_enable_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result,
+ cmd_keyword, "gro");
+cmdline_parse_token_string_t cmd_gro_enable_onoff =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result,
+ cmd_onoff, "on#off");
+
+cmdline_parse_inst_t cmd_gro_enable = {
+ .f = cmd_gro_enable_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> gro on|off",
+ .tokens = {
+ (void *)&cmd_gro_enable_set,
+ (void *)&cmd_gro_enable_port,
+ (void *)&cmd_gro_enable_pid,
+ (void *)&cmd_gro_enable_keyword,
+ (void *)&cmd_gro_enable_onoff,
+ NULL,
+ },
+};
+
+/* *** DISPLAY GRO CONFIGURATION *** */
+struct cmd_gro_show_result {
+ cmdline_fixed_string_t cmd_show;
+ cmdline_fixed_string_t cmd_port;
+ cmdline_fixed_string_t cmd_keyword;
+ portid_t cmd_pid;
+};
+
+static void
+cmd_gro_show_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_show_result *res;
+
+ res = parsed_result;
+ if (!strcmp(res->cmd_keyword, "gro"))
+ show_gro(res->cmd_pid);
+}
+
+cmdline_parse_token_string_t cmd_gro_show_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result,
+ cmd_show, "show");
+cmdline_parse_token_string_t cmd_gro_show_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result,
+ cmd_port, "port");
+cmdline_parse_token_num_t cmd_gro_show_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_show_result,
+ cmd_pid, UINT16);
+cmdline_parse_token_string_t cmd_gro_show_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result,
+ cmd_keyword, "gro");
+
+cmdline_parse_inst_t cmd_gro_show = {
+ .f = cmd_gro_show_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> gro",
+ .tokens = {
+ (void *)&cmd_gro_show_show,
+ (void *)&cmd_gro_show_port,
+ (void *)&cmd_gro_show_pid,
+ (void *)&cmd_gro_show_keyword,
+ NULL,
+ },
+};
+
+/* *** SET FLUSH CYCLES FOR GRO *** */
+struct cmd_gro_flush_result {
+ cmdline_fixed_string_t cmd_set;
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t cmd_flush;
+ uint8_t cmd_cycles;
+};
+
+static void
+cmd_gro_flush_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gro_flush_result *res;
+
+ res = parsed_result;
+ if ((!strcmp(res->cmd_keyword, "gro")) &&
+ (!strcmp(res->cmd_flush, "flush")))
+ setup_gro_flush_cycles(res->cmd_cycles);
}
-cmdline_parse_token_string_t cmd_gro_keyword =
- TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
+cmdline_parse_token_string_t cmd_gro_flush_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result,
+ cmd_set, "set");
+cmdline_parse_token_string_t cmd_gro_flush_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result,
cmd_keyword, "gro");
-cmdline_parse_token_string_t cmd_gro_mode =
- TOKEN_STRING_INITIALIZER(struct cmd_gro_result,
- mode, "on#off");
-cmdline_parse_token_num_t cmd_gro_pid =
- TOKEN_NUM_INITIALIZER(struct cmd_gro_result,
- port_id, UINT8);
+cmdline_parse_token_string_t cmd_gro_flush_flush =
+ TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result,
+ cmd_flush, "flush");
+cmdline_parse_token_num_t cmd_gro_flush_cycles =
+ TOKEN_NUM_INITIALIZER(struct cmd_gro_flush_result,
+ cmd_cycles, UINT8);
-cmdline_parse_inst_t cmd_enable_gro = {
- .f = cmd_enable_gro_parsed,
+cmdline_parse_inst_t cmd_gro_flush = {
+ .f = cmd_gro_flush_parsed,
.data = NULL,
- .help_str = "gro (on|off) (port_id)",
+ .help_str = "set gro flush <cycles>",
.tokens = {
- (void *)&cmd_gro_keyword,
- (void *)&cmd_gro_mode,
- (void *)&cmd_gro_pid,
+ (void *)&cmd_gro_flush_set,
+ (void *)&cmd_gro_flush_keyword,
+ (void *)&cmd_gro_flush_flush,
+ (void *)&cmd_gro_flush_cycles,
NULL,
},
};
-/* *** SET MAX FLOW NUMBER AND ITEM NUM PER FLOW FOR GRO *** */
-struct cmd_gro_set_result {
- cmdline_fixed_string_t gro;
- cmdline_fixed_string_t mode;
- uint16_t flow_num;
- uint16_t item_num_per_flow;
- uint8_t port_id;
+/* *** ENABLE/DISABLE GSO *** */
+struct cmd_gso_enable_result {
+ cmdline_fixed_string_t cmd_set;
+ cmdline_fixed_string_t cmd_port;
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t cmd_mode;
+ portid_t cmd_pid;
};
static void
-cmd_gro_set_parsed(void *parsed_result,
+cmd_gso_enable_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gso_enable_result *res;
+
+ res = parsed_result;
+ if (!strcmp(res->cmd_keyword, "gso"))
+ setup_gso(res->cmd_mode, res->cmd_pid);
+}
+
+cmdline_parse_token_string_t cmd_gso_enable_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+ cmd_set, "set");
+cmdline_parse_token_string_t cmd_gso_enable_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+ cmd_port, "port");
+cmdline_parse_token_string_t cmd_gso_enable_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+ cmd_keyword, "gso");
+cmdline_parse_token_string_t cmd_gso_enable_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+ cmd_mode, "on#off");
+cmdline_parse_token_num_t cmd_gso_enable_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gso_enable_result,
+ cmd_pid, UINT16);
+
+cmdline_parse_inst_t cmd_gso_enable = {
+ .f = cmd_gso_enable_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> gso on|off",
+ .tokens = {
+ (void *)&cmd_gso_enable_set,
+ (void *)&cmd_gso_enable_port,
+ (void *)&cmd_gso_enable_pid,
+ (void *)&cmd_gso_enable_keyword,
+ (void *)&cmd_gso_enable_mode,
+ NULL,
+ },
+};
+
+/* *** SET MAX PACKET LENGTH FOR GSO SEGMENTS *** */
+struct cmd_gso_size_result {
+ cmdline_fixed_string_t cmd_set;
+ cmdline_fixed_string_t cmd_keyword;
+ cmdline_fixed_string_t cmd_segsz;
+ uint16_t cmd_size;
+};
+
+static void
+cmd_gso_size_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_gro_set_result *res = parsed_result;
+ struct cmd_gso_size_result *res = parsed_result;
- if (port_id_is_invalid(res->port_id, ENABLED_WARN))
- return;
if (test_done == 0) {
- printf("Before set GRO flow_num and item_num_per_flow,"
- " please stop forwarding first\n");
+ printf("Before setting GSO segsz, please first"
+ " stop fowarding\n");
return;
}
- if (!strcmp(res->mode, "set")) {
- if (res->flow_num == 0)
- printf("Invalid flow number. Revert to default value:"
- " %u.\n", GRO_DEFAULT_FLOW_NUM);
+ if (!strcmp(res->cmd_keyword, "gso") &&
+ !strcmp(res->cmd_segsz, "segsz")) {
+ if (res->cmd_size < RTE_GSO_SEG_SIZE_MIN)
+ printf("gso_size should be larger than %zu."
+ " Please input a legal value\n",
+ RTE_GSO_SEG_SIZE_MIN);
else
- gro_ports[res->port_id].param.max_flow_num =
- res->flow_num;
+ gso_max_segment_size = res->cmd_size;
+ }
+}
- if (res->item_num_per_flow == 0)
- printf("Invalid item number per-flow. Revert"
- " to default value:%u.\n",
- GRO_DEFAULT_ITEM_NUM_PER_FLOW);
- else
- gro_ports[res->port_id].param.max_item_per_flow =
- res->item_num_per_flow;
+cmdline_parse_token_string_t cmd_gso_size_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+ cmd_set, "set");
+cmdline_parse_token_string_t cmd_gso_size_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+ cmd_keyword, "gso");
+cmdline_parse_token_string_t cmd_gso_size_segsz =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+ cmd_segsz, "segsz");
+cmdline_parse_token_num_t cmd_gso_size_size =
+ TOKEN_NUM_INITIALIZER(struct cmd_gso_size_result,
+ cmd_size, UINT16);
+
+cmdline_parse_inst_t cmd_gso_size = {
+ .f = cmd_gso_size_parsed,
+ .data = NULL,
+ .help_str = "set gso segsz <length>",
+ .tokens = {
+ (void *)&cmd_gso_size_set,
+ (void *)&cmd_gso_size_keyword,
+ (void *)&cmd_gso_size_segsz,
+ (void *)&cmd_gso_size_size,
+ NULL,
+ },
+};
+
+/* *** SHOW GSO CONFIGURATION *** */
+struct cmd_gso_show_result {
+ cmdline_fixed_string_t cmd_show;
+ cmdline_fixed_string_t cmd_port;
+ cmdline_fixed_string_t cmd_keyword;
+ portid_t cmd_pid;
+};
+
+static void
+cmd_gso_show_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_gso_show_result *res = parsed_result;
+
+ if (!rte_eth_dev_is_valid_port(res->cmd_pid)) {
+ printf("invalid port id %u\n", res->cmd_pid);
+ return;
+ }
+ if (!strcmp(res->cmd_keyword, "gso")) {
+ if (gso_ports[res->cmd_pid].enable) {
+ printf("Max GSO'd packet size: %uB\n"
+ "Supported GSO types: TCP/IPv4, "
+ "VxLAN with inner TCP/IPv4 packet, "
+ "GRE with inner TCP/IPv4 packet\n",
+ gso_max_segment_size);
+ } else
+ printf("GSO is not enabled on Port %u\n", res->cmd_pid);
}
}
-cmdline_parse_token_string_t cmd_gro_set_gro =
- TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
- gro, "gro");
-cmdline_parse_token_string_t cmd_gro_set_mode =
- TOKEN_STRING_INITIALIZER(struct cmd_gro_set_result,
- mode, "set");
-cmdline_parse_token_num_t cmd_gro_set_flow_num =
- TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
- flow_num, UINT16);
-cmdline_parse_token_num_t cmd_gro_set_item_num_per_flow =
- TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
- item_num_per_flow, UINT16);
-cmdline_parse_token_num_t cmd_gro_set_portid =
- TOKEN_NUM_INITIALIZER(struct cmd_gro_set_result,
- port_id, UINT8);
+cmdline_parse_token_string_t cmd_gso_show_show =
+TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+ cmd_show, "show");
+cmdline_parse_token_string_t cmd_gso_show_port =
+TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+ cmd_port, "port");
+cmdline_parse_token_string_t cmd_gso_show_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+ cmd_keyword, "gso");
+cmdline_parse_token_num_t cmd_gso_show_pid =
+ TOKEN_NUM_INITIALIZER(struct cmd_gso_show_result,
+ cmd_pid, UINT16);
-cmdline_parse_inst_t cmd_gro_set = {
- .f = cmd_gro_set_parsed,
+cmdline_parse_inst_t cmd_gso_show = {
+ .f = cmd_gso_show_parsed,
.data = NULL,
- .help_str = "gro set <max_flow_num> <max_item_num_per_flow> "
- "<port_id>: set max flow number and max packet number per-flow "
- "for GRO",
+ .help_str = "show port <port_id> gso",
.tokens = {
- (void *)&cmd_gro_set_gro,
- (void *)&cmd_gro_set_mode,
- (void *)&cmd_gro_set_flow_num,
- (void *)&cmd_gro_set_item_num_per_flow,
- (void *)&cmd_gro_set_portid,
+ (void *)&cmd_gso_show_show,
+ (void *)&cmd_gso_show_port,
+ (void *)&cmd_gso_show_pid,
+ (void *)&cmd_gso_show_keyword,
NULL,
},
};
@@ -4048,7 +4409,7 @@ struct cmd_set_bypass_mode_result {
cmdline_fixed_string_t bypass;
cmdline_fixed_string_t mode;
cmdline_fixed_string_t value;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -4091,7 +4452,7 @@ cmdline_parse_token_string_t cmd_setbypass_mode_value =
value, "normal#bypass#isolate");
cmdline_parse_token_num_t cmd_setbypass_mode_port =
TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_mode_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_set_bypass_mode = {
.f = cmd_set_bypass_mode_parsed,
@@ -4116,7 +4477,7 @@ struct cmd_set_bypass_event_result {
cmdline_fixed_string_t event_value;
cmdline_fixed_string_t mode;
cmdline_fixed_string_t mode_value;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -4197,7 +4558,7 @@ cmdline_parse_token_string_t cmd_setbypass_event_mode_value =
mode_value, "normal#bypass#isolate");
cmdline_parse_token_num_t cmd_setbypass_event_port =
TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_event_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_set_bypass_event = {
.f = cmd_set_bypass_event_parsed,
@@ -4285,7 +4646,7 @@ struct cmd_show_bypass_config_result {
cmdline_fixed_string_t show;
cmdline_fixed_string_t bypass;
cmdline_fixed_string_t config;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -4364,7 +4725,7 @@ cmdline_parse_token_string_t cmd_showbypass_config_config =
config, "config");
cmdline_parse_token_num_t cmd_showbypass_config_port =
TOKEN_NUM_INITIALIZER(struct cmd_show_bypass_config_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_show_bypass_config = {
.f = cmd_show_bypass_config_parsed,
@@ -4387,7 +4748,7 @@ struct cmd_set_bonding_mode_result {
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t mode;
uint8_t value;
- uint8_t port_id;
+ portid_t port_id;
};
static void cmd_set_bonding_mode_parsed(void *parsed_result,
@@ -4416,7 +4777,7 @@ TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result,
value, UINT8);
cmdline_parse_token_num_t cmd_setbonding_mode_port =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_set_bonding_mode = {
.f = cmd_set_bonding_mode_parsed,
@@ -4439,7 +4800,7 @@ struct cmd_set_bonding_lacp_dedicated_queues_result {
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t lacp;
cmdline_fixed_string_t dedicated_queues;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mode;
};
@@ -4490,7 +4851,7 @@ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
dedicated_queues, "dedicated_queues");
cmdline_parse_token_num_t cmd_setbonding_lacp_dedicated_queues_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_mode =
TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result,
mode, "enable#disable");
@@ -4517,7 +4878,7 @@ struct cmd_set_bonding_balance_xmit_policy_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t balance_xmit_policy;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t policy;
};
@@ -4558,7 +4919,7 @@ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
balance_xmit_policy, "balance_xmit_policy");
cmdline_parse_token_num_t cmd_setbonding_balance_xmit_policy_port =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_policy =
TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
policy, "l2#l23#l34");
@@ -4584,7 +4945,7 @@ struct cmd_show_bonding_config_result {
cmdline_fixed_string_t show;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t config;
- uint8_t port_id;
+ portid_t port_id;
};
static void cmd_show_bonding_config_parsed(void *parsed_result,
@@ -4593,7 +4954,7 @@ static void cmd_show_bonding_config_parsed(void *parsed_result,
{
struct cmd_show_bonding_config_result *res = parsed_result;
int bonding_mode, agg_mode;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ portid_t slaves[RTE_MAX_ETHPORTS];
int num_slaves, num_active_slaves;
int primary_id;
int i;
@@ -4706,7 +5067,7 @@ TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result,
config, "config");
cmdline_parse_token_num_t cmd_showbonding_config_port =
TOKEN_NUM_INITIALIZER(struct cmd_show_bonding_config_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_show_bonding_config = {
.f = cmd_show_bonding_config_parsed,
@@ -4727,8 +5088,8 @@ struct cmd_set_bonding_primary_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t primary;
- uint8_t slave_id;
- uint8_t port_id;
+ portid_t slave_id;
+ portid_t port_id;
};
static void cmd_set_bonding_primary_parsed(void *parsed_result,
@@ -4759,10 +5120,10 @@ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result,
primary, "primary");
cmdline_parse_token_num_t cmd_setbonding_primary_slave =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result,
- slave_id, UINT8);
+ slave_id, UINT16);
cmdline_parse_token_num_t cmd_setbonding_primary_port =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_set_bonding_primary = {
.f = cmd_set_bonding_primary_parsed,
@@ -4784,8 +5145,8 @@ struct cmd_add_bonding_slave_result {
cmdline_fixed_string_t add;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t slave;
- uint8_t slave_id;
- uint8_t port_id;
+ portid_t slave_id;
+ portid_t port_id;
};
static void cmd_add_bonding_slave_parsed(void *parsed_result,
@@ -4817,10 +5178,10 @@ TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result,
slave, "slave");
cmdline_parse_token_num_t cmd_addbonding_slave_slaveid =
TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result,
- slave_id, UINT8);
+ slave_id, UINT16);
cmdline_parse_token_num_t cmd_addbonding_slave_port =
TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_add_bonding_slave = {
.f = cmd_add_bonding_slave_parsed,
@@ -4842,8 +5203,8 @@ struct cmd_remove_bonding_slave_result {
cmdline_fixed_string_t remove;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t slave;
- uint8_t slave_id;
- uint8_t port_id;
+ portid_t slave_id;
+ portid_t port_id;
};
static void cmd_remove_bonding_slave_parsed(void *parsed_result,
@@ -4875,10 +5236,10 @@ cmdline_parse_token_string_t cmd_removebonding_slave_slave =
slave, "slave");
cmdline_parse_token_num_t cmd_removebonding_slave_slaveid =
TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result,
- slave_id, UINT8);
+ slave_id, UINT16);
cmdline_parse_token_num_t cmd_removebonding_slave_port =
TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_remove_bonding_slave = {
.f = cmd_remove_bonding_slave_parsed,
@@ -4975,7 +5336,7 @@ struct cmd_set_bond_mac_addr_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t mac_addr;
- uint8_t port_num;
+ uint16_t port_num;
struct ether_addr address;
};
@@ -5005,7 +5366,8 @@ cmdline_parse_token_string_t cmd_set_bond_mac_addr_mac =
TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, mac_addr,
"mac_addr");
cmdline_parse_token_num_t cmd_set_bond_mac_addr_portnum =
- TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mac_addr_result, port_num, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mac_addr_result,
+ port_num, UINT16);
cmdline_parse_token_etheraddr_t cmd_set_bond_mac_addr_addr =
TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_bond_mac_addr_result, address);
@@ -5029,7 +5391,7 @@ struct cmd_set_bond_mon_period_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t mon_period;
- uint8_t port_num;
+ uint16_t port_num;
uint32_t period_ms;
};
@@ -5063,7 +5425,7 @@ cmdline_parse_token_string_t cmd_set_bond_mon_period_mon_period =
mon_period, "mon_period");
cmdline_parse_token_num_t cmd_set_bond_mon_period_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result,
- port_num, UINT8);
+ port_num, UINT16);
cmdline_parse_token_num_t cmd_set_bond_mon_period_period_ms =
TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result,
period_ms, UINT32);
@@ -5088,7 +5450,7 @@ struct cmd_set_bonding_agg_mode_policy_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t agg_mode;
- uint8_t port_num;
+ uint16_t port_num;
cmdline_fixed_string_t policy;
};
@@ -5131,7 +5493,7 @@ cmdline_parse_token_string_t cmd_set_bonding_agg_mode_agg_mode =
cmdline_parse_token_num_t cmd_set_bonding_agg_mode_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result,
- port_num, UINT8);
+ port_num, UINT16);
cmdline_parse_token_string_t cmd_set_bonding_agg_mode_policy_string =
TOKEN_STRING_INITIALIZER(
@@ -5345,7 +5707,7 @@ struct cmd_set_promisc_mode_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t promisc;
cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
- uint8_t port_num; /* valid if "allports" argument == 0 */
+ uint16_t port_num; /* valid if "allports" argument == 0 */
cmdline_fixed_string_t mode;
};
@@ -5425,7 +5787,7 @@ struct cmd_set_allmulti_mode_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t allmulti;
cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
- uint8_t port_num; /* valid if "allports" argument == 0 */
+ uint16_t port_num; /* valid if "allports" argument == 0 */
cmdline_fixed_string_t mode;
};
@@ -5469,7 +5831,7 @@ cmdline_parse_token_string_t cmd_setallmulti_portall =
"all");
cmdline_parse_token_num_t cmd_setallmulti_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_set_allmulti_mode_result, port_num,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_setallmulti_mode =
TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, mode,
"on#off");
@@ -5521,7 +5883,7 @@ struct cmd_link_flow_ctrl_set_result {
uint16_t pause_time;
cmdline_fixed_string_t xon_str;
uint16_t send_xon;
- uint8_t port_id;
+ portid_t port_id;
};
cmdline_parse_token_string_t cmd_lfc_set_set =
@@ -5580,7 +5942,7 @@ cmdline_parse_token_string_t cmd_lfc_set_autoneg =
autoneg, "on#off");
cmdline_parse_token_num_t cmd_lfc_set_portid =
TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
- port_id, UINT8);
+ port_id, UINT16);
/* forward declaration */
static void
@@ -5819,7 +6181,7 @@ struct cmd_priority_flow_ctrl_set_result {
uint32_t low_water;
uint16_t pause_time;
uint8_t priority;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -5887,7 +6249,7 @@ cmdline_parse_token_num_t cmd_pfc_set_priority =
priority, UINT8);
cmdline_parse_token_num_t cmd_pfc_set_portid =
TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_priority_flow_control_set = {
.f = cmd_priority_flow_ctrl_set_parsed,
@@ -6045,7 +6407,7 @@ struct cmd_set_link_up_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t link_up;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
};
cmdline_parse_token_string_t cmd_set_link_up_set =
@@ -6056,7 +6418,7 @@ cmdline_parse_token_string_t cmd_set_link_up_link_up =
cmdline_parse_token_string_t cmd_set_link_up_port =
TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, port, "port");
cmdline_parse_token_num_t cmd_set_link_up_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_set_link_up_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_set_link_up_result, port_id, UINT16);
static void cmd_set_link_up_parsed(__attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
@@ -6084,7 +6446,7 @@ struct cmd_set_link_down_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t link_down;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
};
cmdline_parse_token_string_t cmd_set_link_down_set =
@@ -6095,7 +6457,7 @@ cmdline_parse_token_string_t cmd_set_link_down_link_down =
cmdline_parse_token_string_t cmd_set_link_down_port =
TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, port, "port");
cmdline_parse_token_num_t cmd_set_link_down_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_set_link_down_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_set_link_down_result, port_id, UINT16);
static void cmd_set_link_down_parsed(
__attribute__((unused)) void *parsed_result,
@@ -6235,7 +6597,7 @@ struct cmd_showport_result {
cmdline_fixed_string_t show;
cmdline_fixed_string_t port;
cmdline_fixed_string_t what;
- uint8_t portnum;
+ uint16_t portnum;
};
static void cmd_showport_parsed(void *parsed_result,
@@ -6273,7 +6635,7 @@ cmdline_parse_token_string_t cmd_showport_what =
TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what,
"info#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
cmdline_parse_token_num_t cmd_showport_portnum =
- TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT16);
cmdline_parse_inst_t cmd_showport = {
.f = cmd_showport_parsed,
@@ -6295,7 +6657,7 @@ struct cmd_showqueue_result {
cmdline_fixed_string_t show;
cmdline_fixed_string_t type;
cmdline_fixed_string_t what;
- uint8_t portnum;
+ uint16_t portnum;
uint16_t queuenum;
};
@@ -6319,7 +6681,7 @@ cmdline_parse_token_string_t cmd_showqueue_type =
cmdline_parse_token_string_t cmd_showqueue_what =
TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info");
cmdline_parse_token_num_t cmd_showqueue_portnum =
- TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT16);
cmdline_parse_token_num_t cmd_showqueue_queuenum =
TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16);
@@ -6341,7 +6703,7 @@ cmdline_parse_inst_t cmd_showqueue = {
struct cmd_read_reg_result {
cmdline_fixed_string_t read;
cmdline_fixed_string_t reg;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
};
@@ -6359,7 +6721,7 @@ cmdline_parse_token_string_t cmd_read_reg_read =
cmdline_parse_token_string_t cmd_read_reg_reg =
TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, reg, "reg");
cmdline_parse_token_num_t cmd_read_reg_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_read_reg_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, reg_off, UINT32);
@@ -6380,7 +6742,7 @@ cmdline_parse_inst_t cmd_read_reg = {
struct cmd_read_reg_bit_field_result {
cmdline_fixed_string_t read;
cmdline_fixed_string_t regfield;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
uint8_t bit1_pos;
uint8_t bit2_pos;
@@ -6404,7 +6766,7 @@ cmdline_parse_token_string_t cmd_read_reg_bit_field_regfield =
regfield, "regfield");
cmdline_parse_token_num_t cmd_read_reg_bit_field_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, port_id,
- UINT8);
+ UINT16);
cmdline_parse_token_num_t cmd_read_reg_bit_field_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, reg_off,
UINT32);
@@ -6435,7 +6797,7 @@ cmdline_parse_inst_t cmd_read_reg_bit_field = {
struct cmd_read_reg_bit_result {
cmdline_fixed_string_t read;
cmdline_fixed_string_t regbit;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
uint8_t bit_pos;
};
@@ -6455,7 +6817,7 @@ cmdline_parse_token_string_t cmd_read_reg_bit_regbit =
TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result,
regbit, "regbit");
cmdline_parse_token_num_t cmd_read_reg_bit_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_read_reg_bit_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, reg_off, UINT32);
cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos =
@@ -6479,7 +6841,7 @@ cmdline_parse_inst_t cmd_read_reg_bit = {
struct cmd_write_reg_result {
cmdline_fixed_string_t write;
cmdline_fixed_string_t reg;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
uint32_t value;
};
@@ -6498,7 +6860,7 @@ cmdline_parse_token_string_t cmd_write_reg_write =
cmdline_parse_token_string_t cmd_write_reg_reg =
TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, reg, "reg");
cmdline_parse_token_num_t cmd_write_reg_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_write_reg_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, reg_off, UINT32);
cmdline_parse_token_num_t cmd_write_reg_value =
@@ -6522,7 +6884,7 @@ cmdline_parse_inst_t cmd_write_reg = {
struct cmd_write_reg_bit_field_result {
cmdline_fixed_string_t write;
cmdline_fixed_string_t regfield;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
uint8_t bit1_pos;
uint8_t bit2_pos;
@@ -6547,7 +6909,7 @@ cmdline_parse_token_string_t cmd_write_reg_bit_field_regfield =
regfield, "regfield");
cmdline_parse_token_num_t cmd_write_reg_bit_field_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, port_id,
- UINT8);
+ UINT16);
cmdline_parse_token_num_t cmd_write_reg_bit_field_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, reg_off,
UINT32);
@@ -6583,7 +6945,7 @@ cmdline_parse_inst_t cmd_write_reg_bit_field = {
struct cmd_write_reg_bit_result {
cmdline_fixed_string_t write;
cmdline_fixed_string_t regbit;
- uint8_t port_id;
+ portid_t port_id;
uint32_t reg_off;
uint8_t bit_pos;
uint8_t value;
@@ -6605,7 +6967,7 @@ cmdline_parse_token_string_t cmd_write_reg_bit_regbit =
TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result,
regbit, "regbit");
cmdline_parse_token_num_t cmd_write_reg_bit_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_write_reg_bit_reg_off =
TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, reg_off, UINT32);
cmdline_parse_token_num_t cmd_write_reg_bit_bit_pos =
@@ -6633,7 +6995,7 @@ cmdline_parse_inst_t cmd_write_reg_bit = {
struct cmd_read_rxd_txd_result {
cmdline_fixed_string_t read;
cmdline_fixed_string_t rxd_txd;
- uint8_t port_id;
+ portid_t port_id;
uint16_t queue_id;
uint16_t desc_id;
};
@@ -6657,7 +7019,7 @@ cmdline_parse_token_string_t cmd_read_rxd_txd_rxd_txd =
TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, rxd_txd,
"rxd#txd");
cmdline_parse_token_num_t cmd_read_rxd_txd_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT16);
cmdline_parse_token_num_t cmd_read_rxd_txd_queue_id =
TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, queue_id, UINT16);
cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id =
@@ -6707,7 +7069,7 @@ cmdline_parse_inst_t cmd_quit = {
struct cmd_mac_addr_result {
cmdline_fixed_string_t mac_addr_cmd;
cmdline_fixed_string_t what;
- uint8_t port_num;
+ uint16_t port_num;
struct ether_addr address;
};
@@ -6739,7 +7101,8 @@ cmdline_parse_token_string_t cmd_mac_addr_what =
TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what,
"add#remove#set");
cmdline_parse_token_num_t cmd_mac_addr_portnum =
- TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num,
+ UINT16);
cmdline_parse_token_etheraddr_t cmd_mac_addr_addr =
TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address);
@@ -6763,7 +7126,7 @@ struct cmd_set_qmap_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t qmap;
cmdline_fixed_string_t what;
- uint8_t port_id;
+ portid_t port_id;
uint16_t queue_id;
uint8_t map_value;
};
@@ -6790,7 +7153,7 @@ cmdline_parse_token_string_t cmd_setqmap_what =
what, "tx#rx");
cmdline_parse_token_num_t cmd_setqmap_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_setqmap_queueid =
TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result,
queue_id, UINT16);
@@ -6814,11 +7177,53 @@ cmdline_parse_inst_t cmd_set_qmap = {
},
};
+/* *** SET OPTION TO HIDE ZERO VALUES FOR XSTATS DISPLAY *** */
+struct cmd_set_xstats_hide_zero_result {
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t on_off;
+};
+
+static void
+cmd_set_xstats_hide_zero_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_xstats_hide_zero_result *res;
+ uint16_t on_off = 0;
+
+ res = parsed_result;
+ on_off = !strcmp(res->on_off, "on") ? 1 : 0;
+ set_xstats_hide_zero(on_off);
+}
+
+cmdline_parse_token_string_t cmd_set_xstats_hide_zero_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result,
+ keyword, "set");
+cmdline_parse_token_string_t cmd_set_xstats_hide_zero_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result,
+ name, "xstats-hide-zero");
+cmdline_parse_token_string_t cmd_set_xstats_hide_zero_on_off =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result,
+ on_off, "on#off");
+
+cmdline_parse_inst_t cmd_set_xstats_hide_zero = {
+ .f = cmd_set_xstats_hide_zero_parsed,
+ .data = NULL,
+ .help_str = "set xstats-hide-zero on|off",
+ .tokens = {
+ (void *)&cmd_set_xstats_hide_zero_keyword,
+ (void *)&cmd_set_xstats_hide_zero_name,
+ (void *)&cmd_set_xstats_hide_zero_on_off,
+ NULL,
+ },
+};
+
/* *** CONFIGURE UNICAST HASH TABLE *** */
struct cmd_set_uc_hash_table {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t what;
struct ether_addr address;
cmdline_fixed_string_t mode;
@@ -6850,7 +7255,7 @@ cmdline_parse_token_string_t cmd_set_uc_hash_port =
port, "port");
cmdline_parse_token_num_t cmd_set_uc_hash_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_uc_hash_table,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_uc_hash_what =
TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table,
what, "uta");
@@ -6879,7 +7284,7 @@ cmdline_parse_inst_t cmd_set_uc_hash_filter = {
struct cmd_set_uc_all_hash_table {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t what;
cmdline_fixed_string_t value;
cmdline_fixed_string_t mode;
@@ -6911,7 +7316,7 @@ cmdline_parse_token_string_t cmd_set_uc_all_hash_port =
port, "port");
cmdline_parse_token_num_t cmd_set_uc_all_hash_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_uc_all_hash_table,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_uc_all_hash_what =
TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table,
what, "uta");
@@ -6941,7 +7346,7 @@ cmdline_parse_inst_t cmd_set_uc_all_hash_filter = {
struct cmd_set_vf_macvlan_filter {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t vf;
uint8_t vf_id;
struct ether_addr address;
@@ -6960,7 +7365,7 @@ cmd_set_vf_macvlan_parsed(void *parsed_result,
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
- (void)rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
/* set VF MAC filter */
filter.is_vf = 1;
@@ -7003,7 +7408,7 @@ cmdline_parse_token_string_t cmd_set_vf_macvlan_port =
port, "port");
cmdline_parse_token_num_t cmd_set_vf_macvlan_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_vf_macvlan_filter,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_vf_macvlan_vf =
TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter,
vf, "vf");
@@ -7045,7 +7450,7 @@ cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
struct cmd_set_vf_traffic {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t vf;
uint8_t vf_id;
cmdline_fixed_string_t what;
@@ -7072,7 +7477,7 @@ cmdline_parse_token_string_t cmd_setvf_traffic_port =
port, "port");
cmdline_parse_token_num_t cmd_setvf_traffic_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_vf_traffic,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_setvf_traffic_vf =
TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic,
vf, "vf");
@@ -7106,7 +7511,7 @@ cmdline_parse_inst_t cmd_set_vf_traffic = {
struct cmd_set_vf_rxmode {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t vf;
uint8_t vf_id;
cmdline_fixed_string_t what;
@@ -7135,6 +7540,8 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
+ RTE_SET_USED(is_on);
+
#ifdef RTE_LIBRTE_IXGBE_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
@@ -7158,7 +7565,7 @@ cmdline_parse_token_string_t cmd_set_vf_rxmode_port =
port, "port");
cmdline_parse_token_num_t cmd_set_vf_rxmode_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_vf_rxmode,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_vf_rxmode_vf =
TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode,
vf, "vf");
@@ -7198,7 +7605,7 @@ struct cmd_vf_mac_addr_result {
cmdline_fixed_string_t mac_addr_cmd;
cmdline_fixed_string_t what;
cmdline_fixed_string_t port;
- uint8_t port_num;
+ uint16_t port_num;
cmdline_fixed_string_t vf;
uint8_t vf_num;
struct ether_addr address;
@@ -7209,11 +7616,22 @@ static void cmd_vf_mac_addr_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_vf_mac_addr_result *res = parsed_result;
- int ret = 0;
+ int ret = -ENOTSUP;
+
+ if (strcmp(res->what, "add") != 0)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_add_vf_mac_addr(res->port_num, res->vf_num,
+ &res->address);
+#endif
+#ifdef RTE_LIBRTE_BNXT_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_mac_addr_add(res->port_num, &res->address,
+ res->vf_num);
+#endif
- if (strcmp(res->what, "add") == 0)
- ret = rte_eth_dev_mac_addr_add(res->port_num,
- &res->address, res->vf_num);
if(ret < 0)
printf("vf_mac_addr_cmd error: (%s)\n", strerror(-ret));
@@ -7230,7 +7648,7 @@ cmdline_parse_token_string_t cmd_vf_mac_addr_port =
port,"port");
cmdline_parse_token_num_t cmd_vf_mac_addr_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_vf_mac_addr_result,
- port_num, UINT8);
+ port_num, UINT16);
cmdline_parse_token_string_t cmd_vf_mac_addr_vf =
TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result,
vf,"vf");
@@ -7264,7 +7682,7 @@ struct cmd_vf_rx_vlan_filter {
cmdline_fixed_string_t what;
uint16_t vlan_id;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t vf;
uint64_t vf_mask;
};
@@ -7327,7 +7745,7 @@ cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_port =
port, "port");
cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_portid =
TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_vf =
TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter,
vf, "vf");
@@ -7356,7 +7774,7 @@ cmdline_parse_inst_t cmd_vf_rxvlan_filter = {
struct cmd_queue_rate_limit_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_num;
+ uint16_t port_num;
cmdline_fixed_string_t queue;
uint8_t queue_num;
cmdline_fixed_string_t rate;
@@ -7388,7 +7806,7 @@ cmdline_parse_token_string_t cmd_queue_rate_limit_port =
port, "port");
cmdline_parse_token_num_t cmd_queue_rate_limit_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result,
- port_num, UINT8);
+ port_num, UINT16);
cmdline_parse_token_string_t cmd_queue_rate_limit_queue =
TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result,
queue, "queue");
@@ -7423,7 +7841,7 @@ cmdline_parse_inst_t cmd_queue_rate_limit = {
struct cmd_vf_rate_limit_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_num;
+ uint16_t port_num;
cmdline_fixed_string_t vf;
uint8_t vf_num;
cmdline_fixed_string_t rate;
@@ -7458,7 +7876,7 @@ cmdline_parse_token_string_t cmd_vf_rate_limit_port =
port, "port");
cmdline_parse_token_num_t cmd_vf_rate_limit_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result,
- port_num, UINT8);
+ port_num, UINT16);
cmdline_parse_token_string_t cmd_vf_rate_limit_vf =
TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result,
vf, "vf");
@@ -7502,7 +7920,7 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
struct cmd_tunnel_filter_result {
cmdline_fixed_string_t cmd;
cmdline_fixed_string_t what;
- uint8_t port_id;
+ portid_t port_id;
struct ether_addr outer_mac;
struct ether_addr inner_mac;
cmdline_ipaddr_t ip_value;
@@ -7596,7 +8014,7 @@ cmdline_parse_token_string_t cmd_tunnel_filter_what =
what, "add#rm");
cmdline_parse_token_num_t cmd_tunnel_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_etheraddr_t cmd_tunnel_filter_outer_mac =
TOKEN_ETHERADDR_INITIALIZER(struct cmd_tunnel_filter_result,
outer_mac);
@@ -7652,7 +8070,7 @@ struct cmd_tunnel_udp_config {
cmdline_fixed_string_t cmd;
cmdline_fixed_string_t what;
uint16_t udp_port;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -7691,7 +8109,7 @@ cmdline_parse_token_num_t cmd_tunnel_udp_config_udp_port =
udp_port, UINT16);
cmdline_parse_token_num_t cmd_tunnel_udp_config_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_tunnel_udp_config = {
.f = cmd_tunnel_udp_config_parsed,
@@ -7710,7 +8128,7 @@ cmdline_parse_inst_t cmd_tunnel_udp_config = {
/* *** GLOBAL CONFIG *** */
struct cmd_global_config_result {
cmdline_fixed_string_t cmd;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t cfg_type;
uint8_t len;
};
@@ -7737,7 +8155,8 @@ cmdline_parse_token_string_t cmd_global_config_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_global_config_result, cmd,
"global_config");
cmdline_parse_token_num_t cmd_global_config_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_global_config_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_global_config_result, port_id,
+ UINT16);
cmdline_parse_token_string_t cmd_global_config_type =
TOKEN_STRING_INITIALIZER(struct cmd_global_config_result,
cfg_type, "gre-key-len");
@@ -7762,7 +8181,7 @@ cmdline_parse_inst_t cmd_global_config = {
struct cmd_set_mirror_mask_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
cmdline_fixed_string_t what;
@@ -7780,7 +8199,7 @@ cmdline_parse_token_string_t cmd_mirror_mask_port =
port, "port");
cmdline_parse_token_num_t cmd_mirror_mask_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_mirror_mask_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
mirror, "mirror-rule");
@@ -7878,7 +8297,7 @@ cmdline_parse_inst_t cmd_set_mirror_mask = {
struct cmd_set_mirror_link_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
cmdline_fixed_string_t what;
@@ -7895,7 +8314,7 @@ cmdline_parse_token_string_t cmd_mirror_link_port =
port, "port");
cmdline_parse_token_num_t cmd_mirror_link_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_mirror_link_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
mirror, "mirror-rule");
@@ -7968,7 +8387,7 @@ cmdline_parse_inst_t cmd_set_mirror_link = {
struct cmd_rm_mirror_rule_result {
cmdline_fixed_string_t reset;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
};
@@ -7981,7 +8400,7 @@ cmdline_parse_token_string_t cmd_rm_mirror_rule_port =
port, "port");
cmdline_parse_token_num_t cmd_rm_mirror_rule_portid =
TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_rm_mirror_rule_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
mirror, "mirror-rule");
@@ -8126,7 +8545,7 @@ cmdline_parse_inst_t cmd_dump_one = {
/* *** Add/Del syn filter *** */
struct cmd_syn_filter_result {
cmdline_fixed_string_t filter;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t ops;
cmdline_fixed_string_t priority;
cmdline_fixed_string_t high;
@@ -8180,7 +8599,7 @@ cmdline_parse_token_string_t cmd_syn_filter_filter =
filter, "syn_filter");
cmdline_parse_token_num_t cmd_syn_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_syn_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_syn_filter_ops =
TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result,
ops, "add#del");
@@ -8214,10 +8633,452 @@ cmdline_parse_inst_t cmd_syn_filter = {
},
};
+/* *** queue region set *** */
+struct cmd_queue_region_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t region;
+ uint8_t region_id;
+ cmdline_fixed_string_t queue_start_index;
+ uint8_t queue_id;
+ cmdline_fixed_string_t queue_num;
+ uint8_t queue_num_value;
+};
+
+static void
+cmd_queue_region_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_queue_region_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_queue_region_conf region_conf;
+ enum rte_pmd_i40e_queue_region_op op_type;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ memset(&region_conf, 0, sizeof(region_conf));
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_SET;
+ region_conf.region_id = res->region_id;
+ region_conf.queue_num = res->queue_num_value;
+ region_conf.queue_start_index = res->queue_id;
+
+ ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id,
+ op_type, &region_conf);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("queue region config error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_token_string_t cmd_queue_region_set =
+TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_queue_region_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, port, "port");
+cmdline_parse_token_num_t cmd_queue_region_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_queue_region_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result,
+ cmd, "queue-region");
+cmdline_parse_token_string_t cmd_queue_region_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result,
+ region, "region_id");
+cmdline_parse_token_num_t cmd_queue_region_index =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result,
+ region_id, UINT8);
+cmdline_parse_token_string_t cmd_queue_region_queue_start_index =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result,
+ queue_start_index, "queue_start_index");
+cmdline_parse_token_num_t cmd_queue_region_queue_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result,
+ queue_id, UINT8);
+cmdline_parse_token_string_t cmd_queue_region_queue_num =
+ TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result,
+ queue_num, "queue_num");
+cmdline_parse_token_num_t cmd_queue_region_queue_num_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result,
+ queue_num_value, UINT8);
+
+cmdline_parse_inst_t cmd_queue_region = {
+ .f = cmd_queue_region_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> queue-region region_id <value> "
+ "queue_start_index <value> queue_num <value>: Set a queue region",
+ .tokens = {
+ (void *)&cmd_queue_region_set,
+ (void *)&cmd_queue_region_port,
+ (void *)&cmd_queue_region_port_id,
+ (void *)&cmd_queue_region_cmd,
+ (void *)&cmd_queue_region_id,
+ (void *)&cmd_queue_region_index,
+ (void *)&cmd_queue_region_queue_start_index,
+ (void *)&cmd_queue_region_queue_id,
+ (void *)&cmd_queue_region_queue_num,
+ (void *)&cmd_queue_region_queue_num_value,
+ NULL,
+ },
+};
+
+/* *** queue region and flowtype set *** */
+struct cmd_region_flowtype_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t region;
+ uint8_t region_id;
+ cmdline_fixed_string_t flowtype;
+ uint8_t flowtype_id;
+};
+
+static void
+cmd_region_flowtype_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_region_flowtype_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_queue_region_conf region_conf;
+ enum rte_pmd_i40e_queue_region_op op_type;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ memset(&region_conf, 0, sizeof(region_conf));
+
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET;
+ region_conf.region_id = res->region_id;
+ region_conf.hw_flowtype = res->flowtype_id;
+
+ ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id,
+ op_type, &region_conf);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("region flowtype config error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_token_string_t cmd_region_flowtype_set =
+TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_region_flowtype_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_region_flowtype_port_index =
+ TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_region_flowtype_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result,
+ cmd, "queue-region");
+cmdline_parse_token_string_t cmd_region_flowtype_index =
+ TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result,
+ region, "region_id");
+cmdline_parse_token_num_t cmd_region_flowtype_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result,
+ region_id, UINT8);
+cmdline_parse_token_string_t cmd_region_flowtype_flow_index =
+ TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result,
+ flowtype, "flowtype");
+cmdline_parse_token_num_t cmd_region_flowtype_flow_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result,
+ flowtype_id, UINT8);
+cmdline_parse_inst_t cmd_region_flowtype = {
+ .f = cmd_region_flowtype_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> queue-region region_id <value> "
+ "flowtype <value>: Set a flowtype region index",
+ .tokens = {
+ (void *)&cmd_region_flowtype_set,
+ (void *)&cmd_region_flowtype_port,
+ (void *)&cmd_region_flowtype_port_index,
+ (void *)&cmd_region_flowtype_cmd,
+ (void *)&cmd_region_flowtype_index,
+ (void *)&cmd_region_flowtype_id,
+ (void *)&cmd_region_flowtype_flow_index,
+ (void *)&cmd_region_flowtype_flow_id,
+ NULL,
+ },
+};
+
+/* *** User Priority (UP) to queue region (region_id) set *** */
+struct cmd_user_priority_region_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t user_priority;
+ uint8_t user_priority_id;
+ cmdline_fixed_string_t region;
+ uint8_t region_id;
+};
+
+static void
+cmd_user_priority_region_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_user_priority_region_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_queue_region_conf region_conf;
+ enum rte_pmd_i40e_queue_region_op op_type;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ memset(&region_conf, 0, sizeof(region_conf));
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET;
+ region_conf.user_priority = res->user_priority_id;
+ region_conf.region_id = res->region_id;
+
+ ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id,
+ op_type, &region_conf);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("user_priority region config error: (%s)\n",
+ strerror(-ret));
+ }
+}
+
+cmdline_parse_token_string_t cmd_user_priority_region_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_user_priority_region_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_user_priority_region_port_index =
+ TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_user_priority_region_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result,
+ cmd, "queue-region");
+cmdline_parse_token_string_t cmd_user_priority_region_UP =
+ TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result,
+ user_priority, "UP");
+cmdline_parse_token_num_t cmd_user_priority_region_UP_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result,
+ user_priority_id, UINT8);
+cmdline_parse_token_string_t cmd_user_priority_region_region =
+ TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result,
+ region, "region_id");
+cmdline_parse_token_num_t cmd_user_priority_region_region_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result,
+ region_id, UINT8);
+
+cmdline_parse_inst_t cmd_user_priority_region = {
+ .f = cmd_user_priority_region_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> queue-region UP <value> "
+ "region_id <value>: Set the mapping of User Priority (UP) "
+ "to queue region (region_id) ",
+ .tokens = {
+ (void *)&cmd_user_priority_region_set,
+ (void *)&cmd_user_priority_region_port,
+ (void *)&cmd_user_priority_region_port_index,
+ (void *)&cmd_user_priority_region_cmd,
+ (void *)&cmd_user_priority_region_UP,
+ (void *)&cmd_user_priority_region_UP_id,
+ (void *)&cmd_user_priority_region_region,
+ (void *)&cmd_user_priority_region_region_id,
+ NULL,
+ },
+};
+
+/* *** flush all queue region related configuration *** */
+struct cmd_flush_queue_region_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t cmd;
+ cmdline_fixed_string_t flush;
+ cmdline_fixed_string_t what;
+};
+
+static void
+cmd_flush_queue_region_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flush_queue_region_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_queue_region_conf region_conf;
+ enum rte_pmd_i40e_queue_region_op op_type;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ memset(&region_conf, 0, sizeof(region_conf));
+
+ if (strcmp(res->what, "on") == 0)
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON;
+ else
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF;
+
+ ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id,
+ op_type, &region_conf);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("queue region config flush error: (%s)\n",
+ strerror(-ret));
+ }
+}
+
+cmdline_parse_token_string_t cmd_flush_queue_region_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_flush_queue_region_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_flush_queue_region_port_index =
+ TOKEN_NUM_INITIALIZER(struct cmd_flush_queue_region_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_flush_queue_region_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result,
+ cmd, "queue-region");
+cmdline_parse_token_string_t cmd_flush_queue_region_flush =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result,
+ flush, "flush");
+cmdline_parse_token_string_t cmd_flush_queue_region_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result,
+ what, "on#off");
+
+cmdline_parse_inst_t cmd_flush_queue_region = {
+ .f = cmd_flush_queue_region_parsed,
+ .data = NULL,
+ .help_str = "set port <port_id> queue-region flush on|off"
+ ": flush all queue region related configuration",
+ .tokens = {
+ (void *)&cmd_flush_queue_region_set,
+ (void *)&cmd_flush_queue_region_port,
+ (void *)&cmd_flush_queue_region_port_index,
+ (void *)&cmd_flush_queue_region_cmd,
+ (void *)&cmd_flush_queue_region_flush,
+ (void *)&cmd_flush_queue_region_what,
+ NULL,
+ },
+};
+
+/* *** get all queue region related configuration info *** */
+struct cmd_show_queue_region_info {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t cmd;
+};
+
+static void
+cmd_show_queue_region_info_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_queue_region_info *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_queue_regions rte_pmd_regions;
+ enum rte_pmd_i40e_queue_region_op op_type;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ memset(&rte_pmd_regions, 0, sizeof(rte_pmd_regions));
+
+ op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET;
+
+ ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id,
+ op_type, &rte_pmd_regions);
+
+ port_queue_region_info_display(res->port_id, &rte_pmd_regions);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("queue region config info show error: (%s)\n",
+ strerror(-ret));
+ }
+}
+
+cmdline_parse_token_string_t cmd_show_queue_region_info_get =
+TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_queue_region_info_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info,
+ port, "port");
+cmdline_parse_token_num_t cmd_show_queue_region_info_port_index =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_queue_region_info,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_show_queue_region_info_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info,
+ cmd, "queue-region");
+
+cmdline_parse_inst_t cmd_show_queue_region_info_all = {
+ .f = cmd_show_queue_region_info_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> queue-region"
+ ": show all queue region related configuration info",
+ .tokens = {
+ (void *)&cmd_show_queue_region_info_get,
+ (void *)&cmd_show_queue_region_info_port,
+ (void *)&cmd_show_queue_region_info_port_index,
+ (void *)&cmd_show_queue_region_info_cmd,
+ NULL,
+ },
+};
+
/* *** ADD/REMOVE A 2tuple FILTER *** */
struct cmd_2tuple_filter_result {
cmdline_fixed_string_t filter;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t ops;
cmdline_fixed_string_t dst_port;
uint16_t dst_port_value;
@@ -8296,7 +9157,7 @@ cmdline_parse_token_string_t cmd_2tuple_filter_filter =
filter, "2tuple_filter");
cmdline_parse_token_num_t cmd_2tuple_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_2tuple_filter_ops =
TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result,
ops, "add#del");
@@ -8366,7 +9227,7 @@ cmdline_parse_inst_t cmd_2tuple_filter = {
/* *** ADD/REMOVE A 5tuple FILTER *** */
struct cmd_5tuple_filter_result {
cmdline_fixed_string_t filter;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t ops;
cmdline_fixed_string_t dst_ip;
cmdline_ipaddr_t dst_ip_value;
@@ -8475,7 +9336,7 @@ cmdline_parse_token_string_t cmd_5tuple_filter_filter =
filter, "5tuple_filter");
cmdline_parse_token_num_t cmd_5tuple_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_5tuple_filter_ops =
TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result,
ops, "add#del");
@@ -8571,7 +9432,7 @@ cmdline_parse_inst_t cmd_5tuple_filter = {
struct cmd_flex_filter_result {
cmdline_fixed_string_t filter;
cmdline_fixed_string_t ops;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t len;
uint8_t len_value;
cmdline_fixed_string_t bytes;
@@ -8698,7 +9559,7 @@ cmdline_parse_token_string_t cmd_flex_filter_filter =
filter, "flex_filter");
cmdline_parse_token_num_t cmd_flex_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_flex_filter_ops =
TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result,
ops, "add#del");
@@ -8761,7 +9622,7 @@ cmdline_parse_inst_t cmd_flex_filter = {
/* *** deal with ethertype filter *** */
struct cmd_ethertype_filter_result {
cmdline_fixed_string_t filter;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t ops;
cmdline_fixed_string_t mac;
struct ether_addr mac_addr;
@@ -8777,7 +9638,7 @@ cmdline_parse_token_string_t cmd_ethertype_filter_filter =
filter, "ethertype_filter");
cmdline_parse_token_num_t cmd_ethertype_filter_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_ethertype_filter_ops =
TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result,
ops, "add#del");
@@ -8823,7 +9684,7 @@ cmd_ethertype_filter_parsed(void *parsed_result,
memset(&filter, 0, sizeof(filter));
if (!strcmp(res->mac, "mac_addr")) {
filter.flags |= RTE_ETHTYPE_FLAGS_MAC;
- (void)rte_memcpy(&filter.mac_addr, &res->mac_addr,
+ rte_memcpy(&filter.mac_addr, &res->mac_addr,
sizeof(struct ether_addr));
}
if (!strcmp(res->drop, "drop"))
@@ -8870,7 +9731,7 @@ cmdline_parse_inst_t cmd_ethertype_filter = {
/* *** deal with flow director filter *** */
struct cmd_flow_director_result {
cmdline_fixed_string_t flow_director_filter;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mode;
cmdline_fixed_string_t mode_value;
cmdline_fixed_string_t ops;
@@ -8976,6 +9837,10 @@ str2flowtype(char *string)
if (!strcmp(flowtype_str[i].str, string))
return flowtype_str[i].type;
}
+
+ if (isdigit(string[0]) && atoi(string) > 0 && atoi(string) < 64)
+ return (uint16_t)atoi(string);
+
return RTE_ETH_FLOW_UNKNOWN;
}
@@ -9012,7 +9877,7 @@ do { \
#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \
do { \
if ((ip_addr).family == AF_INET6) \
- (void)rte_memcpy(&(ip), \
+ rte_memcpy(&(ip), \
&((ip_addr).addr.ipv6), \
sizeof(struct in6_addr)); \
else { \
@@ -9144,12 +10009,12 @@ cmd_flow_director_filter_parsed(void *parsed_result,
}
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
- (void)rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
+ rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
&res->mac_addr,
sizeof(struct ether_addr));
if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- (void)rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
+ rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
&res->mac_addr,
sizeof(struct ether_addr));
entry.input.flow.tunnel_flow.tunnel_type =
@@ -9158,7 +10023,7 @@ cmd_flow_director_filter_parsed(void *parsed_result,
rte_cpu_to_be_32(res->tunnel_id_value);
}
- (void)rte_memcpy(entry.input.flow_ext.flexbytes,
+ rte_memcpy(entry.input.flow_ext.flexbytes,
flexbytes,
RTE_ETH_FDIR_MAX_FLEXLEN);
@@ -9217,7 +10082,7 @@ cmdline_parse_token_string_t cmd_flow_director_filter =
flow_director_filter, "flow_director_filter");
cmdline_parse_token_num_t cmd_flow_director_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_flow_director_ops =
TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
ops, "add#del#update");
@@ -9346,7 +10211,7 @@ cmdline_parse_inst_t cmd_add_del_ip_flow_director = {
"ipv6-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|"
"l2_payload src <src_ip> dst <dst_ip> tos <tos_value> "
"proto <proto_value> ttl <ttl_value> vlan <vlan_value> "
- "flexbytes <flexbyte_vaues> drop|fw <pf_vf> queue <queue_id> "
+ "flexbytes <flexbyte_values> drop|fw <pf_vf> queue <queue_id> "
"fd_id <fd_id_value>: "
"Add or delete an ip flow director entry on NIC",
.tokens = {
@@ -9542,7 +10407,7 @@ cmdline_parse_inst_t cmd_add_del_tunnel_flow_director = {
struct cmd_flush_flow_director_result {
cmdline_fixed_string_t flush_flow_director;
- uint8_t port_id;
+ portid_t port_id;
};
cmdline_parse_token_string_t cmd_flush_flow_director_flush =
@@ -9550,7 +10415,7 @@ cmdline_parse_token_string_t cmd_flush_flow_director_flush =
flush_flow_director, "flush_flow_director");
cmdline_parse_token_num_t cmd_flush_flow_director_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flush_flow_director_result,
- port_id, UINT8);
+ port_id, UINT16);
static void
cmd_flush_flow_director_parsed(void *parsed_result,
@@ -9589,7 +10454,7 @@ cmdline_parse_inst_t cmd_flush_flow_director = {
/* *** deal with flow director mask *** */
struct cmd_flow_director_mask_result {
cmdline_fixed_string_t flow_director_mask;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t mode;
cmdline_fixed_string_t mode_value;
cmdline_fixed_string_t vlan;
@@ -9673,7 +10538,7 @@ cmdline_parse_token_string_t cmd_flow_director_mask =
flow_director_mask, "flow_director_mask");
cmdline_parse_token_num_t cmd_flow_director_mask_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_flow_director_mask_vlan =
TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result,
vlan, "vlan");
@@ -9801,7 +10666,7 @@ cmdline_parse_inst_t cmd_set_flow_director_tunnel_mask = {
/* *** deal with flow director mask on flexible payload *** */
struct cmd_flow_director_flex_mask_result {
cmdline_fixed_string_t flow_director_flexmask;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t flow;
cmdline_fixed_string_t flow_type;
cmdline_fixed_string_t mask;
@@ -9856,7 +10721,7 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
memset(&port->dev_conf.fdir_conf.flex_conf.flex_mask[i],
0, sizeof(struct rte_eth_fdir_flex_mask));
port->dev_conf.fdir_conf.flex_conf.nb_flexmasks = 1;
- (void)rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
+ rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0],
&flex_mask,
sizeof(struct rte_eth_fdir_flex_mask));
cmd_reconfig_device_queue(res->port_id, 1, 1);
@@ -9893,7 +10758,7 @@ cmdline_parse_token_string_t cmd_flow_director_flexmask =
"flow_director_flex_mask");
cmdline_parse_token_num_t cmd_flow_director_flexmask_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flex_mask_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_flow_director_flexmask_flow =
TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result,
flow, "flow");
@@ -9923,7 +10788,7 @@ cmdline_parse_inst_t cmd_set_flow_director_flex_mask = {
/* *** deal with flow director flexible payload configuration *** */
struct cmd_flow_director_flexpayload_result {
cmdline_fixed_string_t flow_director_flexpayload;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t payload_layer;
cmdline_fixed_string_t payload_cfg;
};
@@ -10016,7 +10881,7 @@ cmdline_parse_token_string_t cmd_flow_director_flexpayload =
"flow_director_flex_payload");
cmdline_parse_token_num_t cmd_flow_director_flexpayload_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flexpayload_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_layer =
TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result,
payload_layer, "raw#l2#l3#l4");
@@ -10045,7 +10910,7 @@ extern cmdline_parse_inst_t cmd_flow;
/* *** Get symmetric hash enable per port *** */
struct cmd_get_sym_hash_ena_per_port_result {
cmdline_fixed_string_t get_sym_hash_ena_per_port;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -10084,7 +10949,7 @@ cmdline_parse_token_string_t cmd_get_sym_hash_ena_per_port_all =
get_sym_hash_ena_per_port, "get_sym_hash_ena_per_port");
cmdline_parse_token_num_t cmd_get_sym_hash_ena_per_port_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_get_sym_hash_ena_per_port_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_get_sym_hash_ena_per_port = {
.f = cmd_get_sym_hash_per_port_parsed,
@@ -10101,7 +10966,7 @@ cmdline_parse_inst_t cmd_get_sym_hash_ena_per_port = {
struct cmd_set_sym_hash_ena_per_port_result {
cmdline_fixed_string_t set_sym_hash_ena_per_port;
cmdline_fixed_string_t enable;
- uint8_t port_id;
+ portid_t port_id;
};
static void
@@ -10140,7 +11005,7 @@ cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_all =
set_sym_hash_ena_per_port, "set_sym_hash_ena_per_port");
cmdline_parse_token_num_t cmd_set_sym_hash_ena_per_port_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_enable =
TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
enable, "enable#disable");
@@ -10160,7 +11025,7 @@ cmdline_parse_inst_t cmd_set_sym_hash_ena_per_port = {
/* Get global config of hash function */
struct cmd_get_hash_global_config_result {
cmdline_fixed_string_t get_hash_global_config;
- uint8_t port_id;
+ portid_t port_id;
};
static char *
@@ -10261,7 +11126,7 @@ cmdline_parse_token_string_t cmd_get_hash_global_config_all =
get_hash_global_config, "get_hash_global_config");
cmdline_parse_token_num_t cmd_get_hash_global_config_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_get_hash_global_config_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_inst_t cmd_get_hash_global_config = {
.f = cmd_get_hash_global_config_parsed,
@@ -10277,7 +11142,7 @@ cmdline_parse_inst_t cmd_get_hash_global_config = {
/* Set global config of hash function */
struct cmd_set_hash_global_config_result {
cmdline_fixed_string_t set_hash_global_config;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t hash_func;
cmdline_fixed_string_t flow_type;
cmdline_fixed_string_t enable;
@@ -10333,7 +11198,7 @@ cmdline_parse_token_string_t cmd_set_hash_global_config_all =
set_hash_global_config, "set_hash_global_config");
cmdline_parse_token_num_t cmd_set_hash_global_config_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_set_hash_global_config_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
hash_func, "toeplitz#simple_xor#default");
@@ -10367,7 +11232,7 @@ cmdline_parse_inst_t cmd_set_hash_global_config = {
/* Set hash input set */
struct cmd_set_hash_input_set_result {
cmdline_fixed_string_t set_hash_input_set;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t flow_type;
cmdline_fixed_string_t inset_field;
cmdline_fixed_string_t select;
@@ -10449,12 +11314,10 @@ cmdline_parse_token_string_t cmd_set_hash_input_set_cmd =
set_hash_input_set, "set_hash_input_set");
cmdline_parse_token_num_t cmd_set_hash_input_set_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_set_hash_input_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_hash_input_set_flow_type =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
- flow_type,
- "ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#"
- "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+ flow_type, NULL);
cmdline_parse_token_string_t cmd_set_hash_input_set_field =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
inset_field,
@@ -10473,7 +11336,7 @@ cmdline_parse_inst_t cmd_set_hash_input_set = {
.data = NULL,
.help_str = "set_hash_input_set <port_id> "
"ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
- "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload|<flowtype_id> "
"ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|"
"ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port|tcp-src-port|"
"tcp-dst-port|sctp-src-port|sctp-dst-port|sctp-veri-tag|udp-key|"
@@ -10492,7 +11355,7 @@ cmdline_parse_inst_t cmd_set_hash_input_set = {
/* Set flow director input set */
struct cmd_set_fdir_input_set_result {
cmdline_fixed_string_t set_fdir_input_set;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t flow_type;
cmdline_fixed_string_t inset_field;
cmdline_fixed_string_t select;
@@ -10524,7 +11387,7 @@ cmdline_parse_token_string_t cmd_set_fdir_input_set_cmd =
set_fdir_input_set, "set_fdir_input_set");
cmdline_parse_token_num_t cmd_set_fdir_input_set_port_id =
TOKEN_NUM_INITIALIZER(struct cmd_set_fdir_input_set_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_set_fdir_input_set_flow_type =
TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result,
flow_type,
@@ -10567,7 +11430,7 @@ cmdline_parse_inst_t cmd_set_fdir_input_set = {
struct cmd_mcast_addr_result {
cmdline_fixed_string_t mcast_addr_cmd;
cmdline_fixed_string_t what;
- uint8_t port_num;
+ uint16_t port_num;
struct ether_addr mc_addr;
};
@@ -10597,7 +11460,7 @@ cmdline_parse_token_string_t cmd_mcast_addr_what =
TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, what,
"add#remove");
cmdline_parse_token_num_t cmd_mcast_addr_portnum =
- TOKEN_NUM_INITIALIZER(struct cmd_mcast_addr_result, port_num, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_mcast_addr_result, port_num, UINT16);
cmdline_parse_token_etheraddr_t cmd_mcast_addr_addr =
TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address);
@@ -10901,7 +11764,7 @@ struct cmd_config_e_tag_result {
cmdline_fixed_string_t dst_pool;
uint8_t dst_pool_val;
cmdline_fixed_string_t port;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t vf;
uint8_t vf_id;
};
@@ -10982,7 +11845,7 @@ cmdline_parse_token_string_t cmd_config_e_tag_port =
cmdline_parse_token_num_t cmd_config_e_tag_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_config_e_tag_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_config_e_tag_vf =
TOKEN_STRING_INITIALIZER
(struct cmd_config_e_tag_result,
@@ -11284,7 +12147,7 @@ struct cmd_vf_vlan_anti_spoof_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t vlan;
cmdline_fixed_string_t antispoof;
- uint8_t port_id;
+ portid_t port_id;
uint32_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -11309,7 +12172,7 @@ cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_antispoof =
cmdline_parse_token_num_t cmd_vf_vlan_anti_spoof_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_anti_spoof_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_vlan_anti_spoof_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_anti_spoof_result,
@@ -11390,7 +12253,7 @@ struct cmd_vf_mac_anti_spoof_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t mac;
cmdline_fixed_string_t antispoof;
- uint8_t port_id;
+ portid_t port_id;
uint32_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -11415,7 +12278,7 @@ cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_antispoof =
cmdline_parse_token_num_t cmd_vf_mac_anti_spoof_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_mac_anti_spoof_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_mac_anti_spoof_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_mac_anti_spoof_result,
@@ -11496,7 +12359,7 @@ struct cmd_vf_vlan_stripq_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t vlan;
cmdline_fixed_string_t stripq;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -11521,7 +12384,7 @@ cmdline_parse_token_string_t cmd_vf_vlan_stripq_stripq =
cmdline_parse_token_num_t cmd_vf_vlan_stripq_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_stripq_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_vlan_stripq_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_stripq_result,
@@ -11602,7 +12465,7 @@ struct cmd_vf_vlan_insert_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t vlan;
cmdline_fixed_string_t insert;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
uint16_t vlan_id;
};
@@ -11627,7 +12490,7 @@ cmdline_parse_token_string_t cmd_vf_vlan_insert_insert =
cmdline_parse_token_num_t cmd_vf_vlan_insert_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_insert_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_vlan_insert_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_vlan_insert_result,
@@ -11705,7 +12568,7 @@ struct cmd_tx_loopback_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t tx;
cmdline_fixed_string_t loopback;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t on_off;
};
@@ -11725,7 +12588,7 @@ cmdline_parse_token_string_t cmd_tx_loopback_loopback =
cmdline_parse_token_num_t cmd_tx_loopback_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_tx_loopback_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_tx_loopback_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_tx_loopback_result,
@@ -11797,7 +12660,7 @@ struct cmd_all_queues_drop_en_result {
cmdline_fixed_string_t all;
cmdline_fixed_string_t queues;
cmdline_fixed_string_t drop;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t on_off;
};
@@ -11821,7 +12684,7 @@ cmdline_parse_token_string_t cmd_all_queues_drop_en_drop =
cmdline_parse_token_num_t cmd_all_queues_drop_en_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_all_queues_drop_en_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_all_queues_drop_en_on_off =
TOKEN_STRING_INITIALIZER
(struct cmd_all_queues_drop_en_result,
@@ -11888,7 +12751,7 @@ struct cmd_vf_split_drop_en_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t split;
cmdline_fixed_string_t drop;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -11913,7 +12776,7 @@ cmdline_parse_token_string_t cmd_vf_split_drop_en_drop =
cmdline_parse_token_num_t cmd_vf_split_drop_en_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_split_drop_en_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_split_drop_en_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_split_drop_en_result,
@@ -11981,7 +12844,7 @@ struct cmd_set_vf_mac_addr_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t mac;
cmdline_fixed_string_t addr;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
struct ether_addr mac_addr;
@@ -12007,7 +12870,7 @@ cmdline_parse_token_string_t cmd_set_vf_mac_addr_addr =
cmdline_parse_token_num_t cmd_set_vf_mac_addr_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_mac_addr_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_set_vf_mac_addr_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_mac_addr_result,
@@ -12084,7 +12947,7 @@ struct cmd_macsec_offload_on_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t macsec;
cmdline_fixed_string_t offload;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t on;
cmdline_fixed_string_t encrypt;
cmdline_fixed_string_t en_on_off;
@@ -12108,7 +12971,7 @@ cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
cmdline_parse_token_num_t cmd_macsec_offload_on_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_macsec_offload_on_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_macsec_offload_on_on =
TOKEN_STRING_INITIALIZER
(struct cmd_macsec_offload_on_result,
@@ -12190,7 +13053,7 @@ struct cmd_macsec_offload_off_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t macsec;
cmdline_fixed_string_t offload;
- uint8_t port_id;
+ portid_t port_id;
cmdline_fixed_string_t off;
};
@@ -12210,7 +13073,7 @@ cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
cmdline_parse_token_num_t cmd_macsec_offload_off_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_macsec_offload_off_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_string_t cmd_macsec_offload_off_off =
TOKEN_STRING_INITIALIZER
(struct cmd_macsec_offload_off_result,
@@ -12268,7 +13131,7 @@ struct cmd_macsec_sc_result {
cmdline_fixed_string_t macsec;
cmdline_fixed_string_t sc;
cmdline_fixed_string_t tx_rx;
- uint8_t port_id;
+ portid_t port_id;
struct ether_addr mac;
uint16_t pi;
};
@@ -12293,7 +13156,7 @@ cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
cmdline_parse_token_num_t cmd_macsec_sc_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_macsec_sc_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
TOKEN_ETHERADDR_INITIALIZER
(struct cmd_macsec_sc_result,
@@ -12358,7 +13221,7 @@ struct cmd_macsec_sa_result {
cmdline_fixed_string_t macsec;
cmdline_fixed_string_t sa;
cmdline_fixed_string_t tx_rx;
- uint8_t port_id;
+ portid_t port_id;
uint8_t idx;
uint8_t an;
uint32_t pn;
@@ -12385,7 +13248,7 @@ cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
cmdline_parse_token_num_t cmd_macsec_sa_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_macsec_sa_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_macsec_sa_idx =
TOKEN_NUM_INITIALIZER
(struct cmd_macsec_sa_result,
@@ -12484,7 +13347,7 @@ struct cmd_vf_promisc_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t promisc;
- uint8_t port_id;
+ portid_t port_id;
uint32_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -12505,7 +13368,7 @@ cmdline_parse_token_string_t cmd_vf_promisc_promisc =
cmdline_parse_token_num_t cmd_vf_promisc_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_promisc_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_promisc_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_promisc_result,
@@ -12574,7 +13437,7 @@ struct cmd_vf_allmulti_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t allmulti;
- uint8_t port_id;
+ portid_t port_id;
uint32_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -12595,7 +13458,7 @@ cmdline_parse_token_string_t cmd_vf_allmulti_allmulti =
cmdline_parse_token_num_t cmd_vf_allmulti_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_allmulti_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_allmulti_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_allmulti_result,
@@ -12664,7 +13527,7 @@ struct cmd_set_vf_broadcast_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t broadcast;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -12685,7 +13548,7 @@ cmdline_parse_token_string_t cmd_set_vf_broadcast_broadcast =
cmdline_parse_token_num_t cmd_set_vf_broadcast_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_broadcast_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_set_vf_broadcast_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_broadcast_result,
@@ -12754,7 +13617,7 @@ struct cmd_set_vf_vlan_tag_result {
cmdline_fixed_string_t vf;
cmdline_fixed_string_t vlan;
cmdline_fixed_string_t tag;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
cmdline_fixed_string_t on_off;
};
@@ -12779,7 +13642,7 @@ cmdline_parse_token_string_t cmd_set_vf_vlan_tag_tag =
cmdline_parse_token_num_t cmd_set_vf_vlan_tag_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_vlan_tag_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_set_vf_vlan_tag_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_set_vf_vlan_tag_result,
@@ -12850,7 +13713,7 @@ struct cmd_vf_tc_bw_result {
cmdline_fixed_string_t min_bw;
cmdline_fixed_string_t max_bw;
cmdline_fixed_string_t strict_link_prio;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
uint8_t tc_no;
uint32_t bw;
@@ -12889,7 +13752,7 @@ cmdline_parse_token_string_t cmd_vf_tc_bw_max_bw =
cmdline_parse_token_num_t cmd_vf_tc_bw_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_tc_bw_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_vf_tc_bw_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_vf_tc_bw_result,
@@ -13181,6 +14044,86 @@ cmdline_parse_inst_t cmd_vf_tc_max_bw = {
},
};
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+
+/* *** Set Port default Traffic Management Hierarchy *** */
+struct cmd_set_port_tm_hierarchy_default_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t hierarchy;
+ cmdline_fixed_string_t def;
+ portid_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result, tm, "tm");
+cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_hierarchy =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result,
+ hierarchy, "hierarchy");
+cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_default =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result,
+ def, "default");
+cmdline_parse_token_num_t cmd_set_port_tm_hierarchy_default_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_tm_hierarchy_default_result,
+ port_id, UINT16);
+
+static void cmd_set_port_tm_hierarchy_default_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_tm_hierarchy_default_result *res = parsed_result;
+ struct rte_port *p;
+ portid_t port_id = res->port_id;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ p = &ports[port_id];
+
+ /* Port tm flag */
+ if (p->softport.tm_flag == 0) {
+ printf(" tm not enabled on port %u (error)\n", port_id);
+ return;
+ }
+
+ /* Forward mode: tm */
+ if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "tm")) {
+ printf(" tm mode not enabled(error)\n");
+ return;
+ }
+
+ /* Set the default tm hierarchy */
+ p->softport.tm.default_hierarchy_enable = 1;
+}
+
+cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
+ .f = cmd_set_port_tm_hierarchy_default_parsed,
+ .data = NULL,
+ .help_str = "set port tm hierarchy default <port_id>",
+ .tokens = {
+ (void *)&cmd_set_port_tm_hierarchy_default_set,
+ (void *)&cmd_set_port_tm_hierarchy_default_port,
+ (void *)&cmd_set_port_tm_hierarchy_default_tm,
+ (void *)&cmd_set_port_tm_hierarchy_default_hierarchy,
+ (void *)&cmd_set_port_tm_hierarchy_default_default,
+ (void *)&cmd_set_port_tm_hierarchy_default_port_id,
+ NULL,
+ },
+};
+#endif
+
/* Strict link priority scheduling mode setting */
static void
cmd_strict_link_prio_parsed(
@@ -13233,7 +14176,7 @@ cmdline_parse_inst_t cmd_strict_link_prio = {
struct cmd_ddp_add_result {
cmdline_fixed_string_t ddp;
cmdline_fixed_string_t add;
- uint8_t port_id;
+ portid_t port_id;
char filepath[];
};
@@ -13242,7 +14185,7 @@ cmdline_parse_token_string_t cmd_ddp_add_ddp =
cmdline_parse_token_string_t cmd_ddp_add_add =
TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, add, "add");
cmdline_parse_token_num_t cmd_ddp_add_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_ddp_add_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_add_result, port_id, UINT16);
cmdline_parse_token_string_t cmd_ddp_add_filepath =
TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, filepath, NULL);
@@ -13318,7 +14261,7 @@ cmdline_parse_inst_t cmd_ddp_add = {
struct cmd_ddp_del_result {
cmdline_fixed_string_t ddp;
cmdline_fixed_string_t del;
- uint8_t port_id;
+ portid_t port_id;
char filepath[];
};
@@ -13327,7 +14270,7 @@ cmdline_parse_token_string_t cmd_ddp_del_ddp =
cmdline_parse_token_string_t cmd_ddp_del_del =
TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, del, "del");
cmdline_parse_token_num_t cmd_ddp_del_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_ddp_del_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_del_result, port_id, UINT16);
cmdline_parse_token_string_t cmd_ddp_del_filepath =
TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, filepath, NULL);
@@ -13412,12 +14355,20 @@ cmd_ddp_info_parsed(
uint32_t pkg_size;
int ret = -ENOTSUP;
#ifdef RTE_LIBRTE_I40E_PMD
- uint32_t i;
+ uint32_t i, j, n;
uint8_t *buff;
- uint32_t buff_size;
+ uint32_t buff_size = 0;
struct rte_pmd_i40e_profile_info info;
- uint32_t dev_num;
+ uint32_t dev_num = 0;
struct rte_pmd_i40e_ddp_device_id *devs;
+ uint32_t proto_num = 0;
+ struct rte_pmd_i40e_proto_info *proto = NULL;
+ uint32_t pctype_num = 0;
+ struct rte_pmd_i40e_ptype_info *pctype;
+ uint32_t ptype_num = 0;
+ struct rte_pmd_i40e_ptype_info *ptype;
+ uint8_t proto_id;
+
#endif
pkg = open_ddp_package_file(res->filepath, &pkg_size);
@@ -13470,12 +14421,11 @@ cmd_ddp_info_parsed(
(uint8_t *)&dev_num, sizeof(dev_num),
RTE_PMD_I40E_PKG_INFO_DEVID_NUM);
if (!ret && dev_num) {
- devs = (struct rte_pmd_i40e_ddp_device_id *)malloc(dev_num *
- sizeof(struct rte_pmd_i40e_ddp_device_id));
+ buff_size = dev_num * sizeof(struct rte_pmd_i40e_ddp_device_id);
+ devs = (struct rte_pmd_i40e_ddp_device_id *)malloc(buff_size);
if (devs) {
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
- (uint8_t *)devs, dev_num *
- sizeof(struct rte_pmd_i40e_ddp_device_id),
+ (uint8_t *)devs, buff_size,
RTE_PMD_I40E_PKG_INFO_DEVID_LIST);
if (!ret) {
printf("List of supported devices:\n");
@@ -13491,7 +14441,110 @@ cmd_ddp_info_parsed(
free(devs);
}
}
+
+ /* get information about protocols and packet types */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&proto_num, sizeof(proto_num),
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+ if (ret || !proto_num)
+ goto no_print_return;
+
+ buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+ proto = (struct rte_pmd_i40e_proto_info *)malloc(buff_size);
+ if (!proto)
+ goto no_print_return;
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)proto,
+ buff_size,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+ if (!ret) {
+ printf("List of used protocols:\n");
+ for (i = 0; i < proto_num; i++)
+ printf(" %2u: %s\n", proto[i].proto_id,
+ proto[i].name);
+ printf("\n");
+ }
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&pctype_num, sizeof(pctype_num),
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+ if (ret || !pctype_num)
+ goto no_print_pctypes;
+
+ buff_size = pctype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+ pctype = (struct rte_pmd_i40e_ptype_info *)malloc(buff_size);
+ if (!pctype)
+ goto no_print_pctypes;
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)pctype,
+ buff_size,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+ if (ret) {
+ free(pctype);
+ goto no_print_pctypes;
+ }
+
+ printf("List of defined packet classification types:\n");
+ for (i = 0; i < pctype_num; i++) {
+ printf(" %2u:", pctype[i].ptype_id);
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = pctype[i].protocols[j];
+ if (proto_id != RTE_PMD_I40E_PROTO_UNUSED) {
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id == proto_id) {
+ printf(" %s", proto[n].name);
+ break;
+ }
+ }
+ }
+ }
+ printf("\n");
+ }
+ printf("\n");
+ free(pctype);
+
+no_print_pctypes:
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&ptype_num,
+ sizeof(ptype_num),
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+ if (ret || !ptype_num)
+ goto no_print_return;
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+ ptype = (struct rte_pmd_i40e_ptype_info *)malloc(buff_size);
+ if (!ptype)
+ goto no_print_return;
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)ptype,
+ buff_size,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+ if (ret) {
+ free(ptype);
+ goto no_print_return;
+ }
+ printf("List of defined packet types:\n");
+ for (i = 0; i < ptype_num; i++) {
+ printf(" %2u:", ptype[i].ptype_id);
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = ptype[i].protocols[j];
+ if (proto_id != RTE_PMD_I40E_PROTO_UNUSED) {
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id == proto_id) {
+ printf(" %s", proto[n].name);
+ break;
+ }
+ }
+ }
+ }
+ printf("\n");
+ }
+ free(ptype);
+ printf("\n");
+
ret = 0;
+no_print_return:
+ if (proto)
+ free(proto);
#endif
if (ret == -ENOTSUP)
printf("Function not supported in PMD driver\n");
@@ -13519,7 +14572,7 @@ struct cmd_ddp_get_list_result {
cmdline_fixed_string_t ddp;
cmdline_fixed_string_t get;
cmdline_fixed_string_t list;
- uint8_t port_id;
+ portid_t port_id;
};
cmdline_parse_token_string_t cmd_ddp_get_list_ddp =
@@ -13529,7 +14582,7 @@ cmdline_parse_token_string_t cmd_ddp_get_list_get =
cmdline_parse_token_string_t cmd_ddp_get_list_list =
TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, list, "list");
cmdline_parse_token_num_t cmd_ddp_get_list_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_ddp_get_list_result, port_id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_get_list_result, port_id, UINT16);
static void
cmd_ddp_get_list_parsed(
@@ -13606,7 +14659,7 @@ struct cmd_show_vf_stats_result {
cmdline_fixed_string_t show;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t stats;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
};
@@ -13626,7 +14679,7 @@ cmdline_parse_token_string_t cmd_show_vf_stats_stats =
cmdline_parse_token_num_t cmd_show_vf_stats_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_show_vf_stats_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_show_vf_stats_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_show_vf_stats_result,
@@ -13715,7 +14768,7 @@ struct cmd_clear_vf_stats_result {
cmdline_fixed_string_t clear;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t stats;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
};
@@ -13735,7 +14788,7 @@ cmdline_parse_token_string_t cmd_clear_vf_stats_stats =
cmdline_parse_token_num_t cmd_clear_vf_stats_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_clear_vf_stats_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_clear_vf_stats_vf_id =
TOKEN_NUM_INITIALIZER
(struct cmd_clear_vf_stats_result,
@@ -13795,6 +14848,303 @@ cmdline_parse_inst_t cmd_clear_vf_stats = {
},
};
+/* port config pctype mapping reset */
+
+/* Common result structure for port config pctype mapping reset */
+struct cmd_pctype_mapping_reset_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t pctype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t reset;
+};
+
+/* Common CLI fields for port config pctype mapping reset*/
+cmdline_parse_token_string_t cmd_pctype_mapping_reset_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_pctype_mapping_reset_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_pctype_mapping_reset_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_pctype_mapping_reset_pctype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ pctype, "pctype");
+cmdline_parse_token_string_t cmd_pctype_mapping_reset_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_pctype_mapping_reset_reset =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_reset_result,
+ reset, "reset");
+
+static void
+cmd_pctype_mapping_reset_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_pctype_mapping_reset_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_flow_type_mapping_reset(res->port_id);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_pctype_mapping_reset = {
+ .f = cmd_pctype_mapping_reset_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> pctype mapping reset",
+ .tokens = {
+ (void *)&cmd_pctype_mapping_reset_port,
+ (void *)&cmd_pctype_mapping_reset_config,
+ (void *)&cmd_pctype_mapping_reset_port_id,
+ (void *)&cmd_pctype_mapping_reset_pctype,
+ (void *)&cmd_pctype_mapping_reset_mapping,
+ (void *)&cmd_pctype_mapping_reset_reset,
+ NULL,
+ },
+};
+
+/* show port pctype mapping */
+
+/* Common result structure for show port pctype mapping */
+struct cmd_pctype_mapping_get_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t pctype;
+ cmdline_fixed_string_t mapping;
+};
+
+/* Common CLI fields for pctype mapping get */
+cmdline_parse_token_string_t cmd_pctype_mapping_get_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_get_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_pctype_mapping_get_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_get_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_pctype_mapping_get_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_pctype_mapping_get_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_pctype_mapping_get_pctype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_get_result,
+ pctype, "pctype");
+cmdline_parse_token_string_t cmd_pctype_mapping_get_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_get_result,
+ mapping, "mapping");
+
+static void
+cmd_pctype_mapping_get_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_pctype_mapping_get_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_flow_type_mapping
+ mapping[RTE_PMD_I40E_FLOW_TYPE_MAX];
+ int i, j, first_pctype;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_flow_type_mapping_get(res->port_id, mapping);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ return;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ return;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ return;
+ }
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ for (i = 0; i < RTE_PMD_I40E_FLOW_TYPE_MAX; i++) {
+ if (mapping[i].pctype != 0ULL) {
+ first_pctype = 1;
+
+ printf("pctype: ");
+ for (j = 0; j < RTE_PMD_I40E_PCTYPE_MAX; j++) {
+ if (mapping[i].pctype & (1ULL << j)) {
+ printf(first_pctype ?
+ "%02d" : ",%02d", j);
+ first_pctype = 0;
+ }
+ }
+ printf(" -> flowtype: %02d\n", mapping[i].flow_type);
+ }
+ }
+#endif
+}
+
+cmdline_parse_inst_t cmd_pctype_mapping_get = {
+ .f = cmd_pctype_mapping_get_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> pctype mapping",
+ .tokens = {
+ (void *)&cmd_pctype_mapping_get_show,
+ (void *)&cmd_pctype_mapping_get_port,
+ (void *)&cmd_pctype_mapping_get_port_id,
+ (void *)&cmd_pctype_mapping_get_pctype,
+ (void *)&cmd_pctype_mapping_get_mapping,
+ NULL,
+ },
+};
+
+/* port config pctype mapping update */
+
+/* Common result structure for port config pctype mapping update */
+struct cmd_pctype_mapping_update_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t pctype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t update;
+ cmdline_fixed_string_t pctype_list;
+ uint16_t flow_type;
+};
+
+/* Common CLI fields for pctype mapping update*/
+cmdline_parse_token_string_t cmd_pctype_mapping_update_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_pctype_mapping_update_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_pctype_mapping_update_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_pctype_mapping_update_pctype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ pctype, "pctype");
+cmdline_parse_token_string_t cmd_pctype_mapping_update_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_pctype_mapping_update_update =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ update, "update");
+cmdline_parse_token_string_t cmd_pctype_mapping_update_pc_type =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ pctype_list, NULL);
+cmdline_parse_token_num_t cmd_pctype_mapping_update_flow_type =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_pctype_mapping_update_result,
+ flow_type, UINT16);
+
+static void
+cmd_pctype_mapping_update_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_pctype_mapping_update_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_flow_type_mapping mapping;
+ unsigned int i;
+ unsigned int nb_item;
+ unsigned int pctype_list[RTE_PMD_I40E_PCTYPE_MAX];
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ nb_item = parse_item_list(res->pctype_list, "pctypes",
+ RTE_PMD_I40E_PCTYPE_MAX, pctype_list, 1);
+ mapping.flow_type = res->flow_type;
+ for (i = 0, mapping.pctype = 0ULL; i < nb_item; i++)
+ mapping.pctype |= (1ULL << pctype_list[i]);
+ ret = rte_pmd_i40e_flow_type_mapping_update(res->port_id,
+ &mapping,
+ 1,
+ 0);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid pctype or flow type\n");
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_pctype_mapping_update = {
+ .f = cmd_pctype_mapping_update_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> pctype mapping update"
+ " <pctype_id_0,[pctype_id_1]*> <flowtype_id>",
+ .tokens = {
+ (void *)&cmd_pctype_mapping_update_port,
+ (void *)&cmd_pctype_mapping_update_config,
+ (void *)&cmd_pctype_mapping_update_port_id,
+ (void *)&cmd_pctype_mapping_update_pctype,
+ (void *)&cmd_pctype_mapping_update_mapping,
+ (void *)&cmd_pctype_mapping_update_update,
+ (void *)&cmd_pctype_mapping_update_pc_type,
+ (void *)&cmd_pctype_mapping_update_flow_type,
+ NULL,
+ },
+};
+
/* ptype mapping get */
/* Common result structure for ptype mapping get */
@@ -13802,7 +15152,7 @@ struct cmd_ptype_mapping_get_result {
cmdline_fixed_string_t ptype;
cmdline_fixed_string_t mapping;
cmdline_fixed_string_t get;
- uint8_t port_id;
+ portid_t port_id;
uint8_t valid_only;
};
@@ -13822,7 +15172,7 @@ cmdline_parse_token_string_t cmd_ptype_mapping_get_get =
cmdline_parse_token_num_t cmd_ptype_mapping_get_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_get_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_ptype_mapping_get_valid_only =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_get_result,
@@ -13897,7 +15247,7 @@ struct cmd_ptype_mapping_replace_result {
cmdline_fixed_string_t ptype;
cmdline_fixed_string_t mapping;
cmdline_fixed_string_t replace;
- uint8_t port_id;
+ portid_t port_id;
uint32_t target;
uint8_t mask;
uint32_t pkt_type;
@@ -13919,7 +15269,7 @@ cmdline_parse_token_string_t cmd_ptype_mapping_replace_replace =
cmdline_parse_token_num_t cmd_ptype_mapping_replace_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_replace_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_ptype_mapping_replace_target =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_replace_result,
@@ -13994,7 +15344,7 @@ struct cmd_ptype_mapping_reset_result {
cmdline_fixed_string_t ptype;
cmdline_fixed_string_t mapping;
cmdline_fixed_string_t reset;
- uint8_t port_id;
+ portid_t port_id;
};
/* Common CLI fields for ptype mapping reset*/
@@ -14013,7 +15363,7 @@ cmdline_parse_token_string_t cmd_ptype_mapping_reset_reset =
cmdline_parse_token_num_t cmd_ptype_mapping_reset_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_reset_result,
- port_id, UINT8);
+ port_id, UINT16);
static void
cmd_ptype_mapping_reset_parsed(
@@ -14065,7 +15415,7 @@ struct cmd_ptype_mapping_update_result {
cmdline_fixed_string_t ptype;
cmdline_fixed_string_t mapping;
cmdline_fixed_string_t reset;
- uint8_t port_id;
+ portid_t port_id;
uint8_t hw_ptype;
uint32_t sw_ptype;
};
@@ -14086,7 +15436,7 @@ cmdline_parse_token_string_t cmd_ptype_mapping_update_update =
cmdline_parse_token_num_t cmd_ptype_mapping_update_port_id =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_update_result,
- port_id, UINT8);
+ port_id, UINT16);
cmdline_parse_token_num_t cmd_ptype_mapping_update_hw_ptype =
TOKEN_NUM_INITIALIZER
(struct cmd_ptype_mapping_update_result,
@@ -14249,8 +15599,12 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_show,
- (cmdline_parse_inst_t *)&cmd_enable_gro,
- (cmdline_parse_inst_t *)&cmd_gro_set,
+ (cmdline_parse_inst_t *)&cmd_gro_enable,
+ (cmdline_parse_inst_t *)&cmd_gro_flush,
+ (cmdline_parse_inst_t *)&cmd_gro_show,
+ (cmdline_parse_inst_t *)&cmd_gso_enable,
+ (cmdline_parse_inst_t *)&cmd_gso_size,
+ (cmdline_parse_inst_t *)&cmd_gso_show,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,
@@ -14272,6 +15626,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_stop,
(cmdline_parse_inst_t *)&cmd_mac_addr,
(cmdline_parse_inst_t *)&cmd_set_qmap,
+ (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero,
(cmdline_parse_inst_t *)&cmd_operate_port,
(cmdline_parse_inst_t *)&cmd_operate_specific_port,
(cmdline_parse_inst_t *)&cmd_operate_attach_port,
@@ -14330,6 +15685,15 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_hash_input_set,
(cmdline_parse_inst_t *)&cmd_set_fdir_input_set,
(cmdline_parse_inst_t *)&cmd_flow,
+ (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm,
+ (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm,
+ (cmdline_parse_inst_t *)&cmd_del_port_meter_profile,
+ (cmdline_parse_inst_t *)&cmd_set_port_meter,
+ (cmdline_parse_inst_t *)&cmd_del_port_meter,
+ (cmdline_parse_inst_t *)&cmd_set_port_meter_profile,
+ (cmdline_parse_inst_t *)&cmd_set_port_meter_policer_action,
+ (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask,
+ (cmdline_parse_inst_t *)&cmd_show_port_meter_stats,
(cmdline_parse_inst_t *)&cmd_mcast_addr,
(cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_all,
(cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_specific,
@@ -14366,6 +15730,9 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_vf_tc_max_bw,
(cmdline_parse_inst_t *)&cmd_strict_link_prio,
(cmdline_parse_inst_t *)&cmd_tc_min_bw,
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+ (cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
+#endif
(cmdline_parse_inst_t *)&cmd_ddp_add,
(cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
@@ -14376,6 +15743,32 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_ptype_mapping_replace,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_reset,
(cmdline_parse_inst_t *)&cmd_ptype_mapping_update,
+
+ (cmdline_parse_inst_t *)&cmd_pctype_mapping_get,
+ (cmdline_parse_inst_t *)&cmd_pctype_mapping_reset,
+ (cmdline_parse_inst_t *)&cmd_pctype_mapping_update,
+ (cmdline_parse_inst_t *)&cmd_queue_region,
+ (cmdline_parse_inst_t *)&cmd_region_flowtype,
+ (cmdline_parse_inst_t *)&cmd_user_priority_region,
+ (cmdline_parse_inst_t *)&cmd_flush_queue_region,
+ (cmdline_parse_inst_t *)&cmd_show_queue_region_info_all,
+ (cmdline_parse_inst_t *)&cmd_show_port_tm_cap,
+ (cmdline_parse_inst_t *)&cmd_show_port_tm_level_cap,
+ (cmdline_parse_inst_t *)&cmd_show_port_tm_node_cap,
+ (cmdline_parse_inst_t *)&cmd_show_port_tm_node_type,
+ (cmdline_parse_inst_t *)&cmd_show_port_tm_node_stats,
+ (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shaper_profile,
+ (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shaper_profile,
+ (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shared_shaper,
+ (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shared_shaper,
+ (cmdline_parse_inst_t *)&cmd_add_port_tm_node_wred_profile,
+ (cmdline_parse_inst_t *)&cmd_del_port_tm_node_wred_profile,
+ (cmdline_parse_inst_t *)&cmd_set_port_tm_node_shaper_profile,
+ (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node,
+ (cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node,
+ (cmdline_parse_inst_t *)&cmd_del_port_tm_node,
+ (cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent,
+ (cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit,
NULL,
};
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a17a0043..df16d2ab 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -171,6 +171,10 @@ enum index {
ITEM_GRE_PROTO,
ITEM_FUZZY,
ITEM_FUZZY_THRESH,
+ ITEM_GTP,
+ ITEM_GTP_TEID,
+ ITEM_GTPC,
+ ITEM_GTPU,
/* Validate/create actions. */
ACTIONS,
@@ -194,6 +198,8 @@ enum index {
ACTION_VF,
ACTION_VF_ORIGINAL,
ACTION_VF_ID,
+ ACTION_METER,
+ ACTION_METER_ID,
};
/** Size of pattern[] field in struct rte_flow_item_raw. */
@@ -226,7 +232,7 @@ struct context {
int args_num; /**< Number of entries in args[]. */
uint32_t eol:1; /**< EOL has been detected. */
uint32_t last:1; /**< No more arguments. */
- uint16_t port; /**< Current port ID (for completions). */
+ portid_t port; /**< Current port ID (for completions). */
uint32_t objdata; /**< Object-specific data. */
void *object; /**< Address of current object for relative offsets. */
void *objmask; /**< Object a full mask must be written to. */
@@ -346,7 +352,7 @@ struct token {
/** Parser output buffer layout expected by cmd_flow_parsed(). */
struct buffer {
enum index command; /**< Flow command. */
- uint16_t port; /**< Affected port ID. */
+ portid_t port; /**< Affected port ID. */
union {
struct {
struct rte_flow_attr attr;
@@ -451,6 +457,9 @@ static const enum index next_item[] = {
ITEM_MPLS,
ITEM_GRE,
ITEM_FUZZY,
+ ITEM_GTP,
+ ITEM_GTPC,
+ ITEM_GTPU,
ZERO,
};
@@ -588,6 +597,12 @@ static const enum index item_gre[] = {
ZERO,
};
+static const enum index item_gtp[] = {
+ ITEM_GTP_TEID,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
@@ -601,6 +616,7 @@ static const enum index next_action[] = {
ACTION_RSS,
ACTION_PF,
ACTION_VF,
+ ACTION_METER,
ZERO,
};
@@ -635,6 +651,12 @@ static const enum index action_vf[] = {
ZERO,
};
+static const enum index action_meter[] = {
+ ACTION_METER_ID,
+ ACTION_NEXT,
+ ZERO,
+};
+
static int parse_init(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -1421,6 +1443,33 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
thresh)),
},
+ [ITEM_GTP] = {
+ .name = "gtp",
+ .help = "match GTP header",
+ .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+ .next = NEXT(item_gtp),
+ .call = parse_vc,
+ },
+ [ITEM_GTP_TEID] = {
+ .name = "teid",
+ .help = "tunnel endpoint identifier",
+ .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
+ },
+ [ITEM_GTPC] = {
+ .name = "gtpc",
+ .help = "match GTP header",
+ .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+ .next = NEXT(item_gtp),
+ .call = parse_vc,
+ },
+ [ITEM_GTPU] = {
+ .name = "gtpu",
+ .help = "match GTP header",
+ .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
+ .next = NEXT(item_gtp),
+ .call = parse_vc,
+ },
/* Validate/create actions. */
[ACTIONS] = {
@@ -1566,6 +1615,21 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
.call = parse_vc_conf,
},
+ [ACTION_METER] = {
+ .name = "meter",
+ .help = "meter the directed packets at given id",
+ .priv = PRIV_ACTION(METER,
+ sizeof(struct rte_flow_action_meter)),
+ .next = NEXT(action_meter),
+ .call = parse_vc,
+ },
+ [ACTION_METER_ID] = {
+ .name = "mtr_id",
+ .help = "meter id to use",
+ .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
+ .call = parse_vc_conf,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -2578,7 +2642,7 @@ comp_rule_id(struct context *ctx, const struct token *token,
(void)token;
if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
- ctx->port == (uint16_t)RTE_PORT_ALL)
+ ctx->port == (portid_t)RTE_PORT_ALL)
return -1;
port = &ports[ctx->port];
for (pf = port->flow_list; pf != NULL; pf = pf->next) {
diff --git a/app/test-pmd/cmdline_mtr.c b/app/test-pmd/cmdline_mtr.c
new file mode 100644
index 00000000..d8d806d7
--- /dev/null
+++ b/app/test-pmd/cmdline_mtr.c
@@ -0,0 +1,1018 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_mtr.h>
+
+#include "testpmd.h"
+#include "cmdline_mtr.h"
+
+/** Display Meter Error Message */
+static void
+print_err_msg(struct rte_mtr_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_MTR_ERROR_TYPE_NONE] = "no error",
+ [RTE_MTR_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_MTR_ERROR_TYPE_METER_PROFILE_ID] = "meter profile id",
+ [RTE_MTR_ERROR_TYPE_METER_PROFILE] = "meter profile null",
+ [RTE_MTR_ERROR_TYPE_MTR_ID] = "meter id",
+ [RTE_MTR_ERROR_TYPE_MTR_PARAMS] = "meter params null",
+ [RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN]
+ = "policer action(green)",
+ [RTE_MTR_ERROR_TYPE_POLICER_ACTION_YELLOW]
+ = "policer action(yellow)",
+ [RTE_MTR_ERROR_TYPE_POLICER_ACTION_RED]
+ = "policer action(red)",
+ [RTE_MTR_ERROR_TYPE_STATS_MASK] = "stats mask",
+ [RTE_MTR_ERROR_TYPE_STATS] = "stats",
+ [RTE_MTR_ERROR_TYPE_SHARED]
+ = "shared meter",
+ };
+
+ const char *errstr;
+ char buf[64];
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+
+ if (error->cause)
+ snprintf(buf, sizeof(buf), "cause: %p, ", error->cause);
+
+ printf("%s: %s%s (error %d)\n", errstr, error->cause ? buf : "",
+ error->message ? error->message : "(no stated reason)",
+ error->type);
+}
+
+static int
+string_to_policer_action(char *s)
+{
+ if (strcmp(s, "G") == 0)
+ return MTR_POLICER_ACTION_COLOR_GREEN;
+
+ if (strcmp(s, "Y") == 0)
+ return MTR_POLICER_ACTION_COLOR_YELLOW;
+
+ if (strcmp(s, "R") == 0)
+ return MTR_POLICER_ACTION_COLOR_RED;
+
+ if (strcmp(s, "D") == 0)
+ return MTR_POLICER_ACTION_DROP;
+
+ return -1;
+}
+
+/* *** Add Port Meter Profile srtcm_rfc2697 *** */
+struct cmd_add_port_meter_profile_srtcm_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t profile;
+ cmdline_fixed_string_t srtcm_rfc2697;
+ uint16_t port_id;
+ uint32_t profile_id;
+ uint64_t cir;
+ uint64_t cbs;
+ uint64_t ebs;
+ uint8_t color_aware;
+};
+
+cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ meter, "meter");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ profile, "profile");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_srtcm_rfc2697 =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ srtcm_rfc2697, "srtcm_rfc2697");
+cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_cir =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ cir, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_cbs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ cbs, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_ebs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_srtcm_result,
+ ebs, UINT64);
+
+static void cmd_add_port_meter_profile_srtcm_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_meter_profile_srtcm_result *res = parsed_result;
+ struct rte_mtr_meter_profile mp;
+ struct rte_mtr_error error;
+ uint32_t profile_id = res->profile_id;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Private shaper profile params */
+ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));
+ mp.alg = 0;
+ mp.srtcm_rfc2697.cir = res->cir;
+ mp.srtcm_rfc2697.cbs = res->cbs;
+ mp.srtcm_rfc2697.ebs = res->ebs;
+
+ ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = {
+ .f = cmd_add_port_meter_profile_srtcm_parsed,
+ .data = NULL,
+ .help_str = "Add port meter profile srtcm (rfc2697)",
+ .tokens = {
+ (void *)&cmd_add_port_meter_profile_srtcm_add,
+ (void *)&cmd_add_port_meter_profile_srtcm_port,
+ (void *)&cmd_add_port_meter_profile_srtcm_meter,
+ (void *)&cmd_add_port_meter_profile_srtcm_profile,
+ (void *)&cmd_add_port_meter_profile_srtcm_port_id,
+ (void *)&cmd_add_port_meter_profile_srtcm_profile_id,
+ (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697,
+ (void *)&cmd_add_port_meter_profile_srtcm_cir,
+ (void *)&cmd_add_port_meter_profile_srtcm_cbs,
+ (void *)&cmd_add_port_meter_profile_srtcm_ebs,
+ NULL,
+ },
+};
+
+/* *** Add Port Meter Profile trtcm_rfc2698 *** */
+struct cmd_add_port_meter_profile_trtcm_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t profile;
+ cmdline_fixed_string_t trtcm_rfc2698;
+ uint16_t port_id;
+ uint32_t profile_id;
+ uint64_t cir;
+ uint64_t pir;
+ uint64_t cbs;
+ uint64_t pbs;
+};
+
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ meter, "meter");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ profile, "profile");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_trtcm_rfc2698 =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ trtcm_rfc2698, "trtcm_rfc2698");
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_cir =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ cir, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_pir =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ pir, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_cbs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ cbs, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_pbs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_result,
+ pbs, UINT64);
+
+static void cmd_add_port_meter_profile_trtcm_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_meter_profile_trtcm_result *res = parsed_result;
+ struct rte_mtr_meter_profile mp;
+ struct rte_mtr_error error;
+ uint32_t profile_id = res->profile_id;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Private shaper profile params */
+ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));
+ mp.alg = 0;
+ mp.trtcm_rfc2698.cir = res->cir;
+ mp.trtcm_rfc2698.pir = res->pir;
+ mp.trtcm_rfc2698.cbs = res->cbs;
+ mp.trtcm_rfc2698.pbs = res->pbs;
+
+ ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = {
+ .f = cmd_add_port_meter_profile_trtcm_parsed,
+ .data = NULL,
+ .help_str = "Add port meter profile trtcm (rfc2698)",
+ .tokens = {
+ (void *)&cmd_add_port_meter_profile_trtcm_add,
+ (void *)&cmd_add_port_meter_profile_trtcm_port,
+ (void *)&cmd_add_port_meter_profile_trtcm_meter,
+ (void *)&cmd_add_port_meter_profile_trtcm_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_port_id,
+ (void *)&cmd_add_port_meter_profile_trtcm_profile_id,
+ (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698,
+ (void *)&cmd_add_port_meter_profile_trtcm_cir,
+ (void *)&cmd_add_port_meter_profile_trtcm_pir,
+ (void *)&cmd_add_port_meter_profile_trtcm_cbs,
+ (void *)&cmd_add_port_meter_profile_trtcm_pbs,
+ NULL,
+ },
+};
+
+/* *** Add Port Meter Profile trtcm_rfc4115 *** */
+struct cmd_add_port_meter_profile_trtcm_rfc4115_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t profile;
+ cmdline_fixed_string_t trtcm_rfc4115;
+ uint16_t port_id;
+ uint32_t profile_id;
+ uint64_t cir;
+ uint64_t eir;
+ uint64_t cbs;
+ uint64_t ebs;
+};
+
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result, add,
+ "add");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ meter, "meter");
+cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ profile, "profile");
+cmdline_parse_token_string_t
+ cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115 =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ trtcm_rfc4115, "trtcm_rfc4115");
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_cir =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ cir, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_eir =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ eir, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_cbs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ cbs, UINT64);
+cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_ebs =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result,
+ ebs, UINT64);
+
+static void cmd_add_port_meter_profile_trtcm_rfc4115_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_meter_profile_trtcm_rfc4115_result *res =
+ parsed_result;
+ struct rte_mtr_meter_profile mp;
+ struct rte_mtr_error error;
+ uint32_t profile_id = res->profile_id;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Private shaper profile params */
+ memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));
+ mp.alg = 0;
+ mp.trtcm_rfc4115.cir = res->cir;
+ mp.trtcm_rfc4115.eir = res->eir;
+ mp.trtcm_rfc4115.cbs = res->cbs;
+ mp.trtcm_rfc4115.ebs = res->ebs;
+
+ ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = {
+ .f = cmd_add_port_meter_profile_trtcm_rfc4115_parsed,
+ .data = NULL,
+ .help_str = "Add port meter profile trtcm (rfc4115)",
+ .tokens = {
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_add,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_meter,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port_id,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile_id,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cir,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_eir,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cbs,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_ebs,
+ NULL,
+ },
+};
+
+/* *** Delete Port Meter Profile *** */
+struct cmd_del_port_meter_profile_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t profile_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_meter_profile_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_profile_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_meter_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_profile_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_del_port_meter_profile_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_profile_result,
+ meter, "meter");
+cmdline_parse_token_string_t cmd_del_port_meter_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_del_port_meter_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_meter_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_meter_profile_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_meter_profile_result,
+ profile_id, UINT32);
+
+static void cmd_del_port_meter_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_meter_profile_result *res = parsed_result;
+ struct rte_mtr_error error;
+ uint32_t profile_id = res->profile_id;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Delete meter profile */
+ ret = rte_mtr_meter_profile_delete(port_id, profile_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_meter_profile = {
+ .f = cmd_del_port_meter_profile_parsed,
+ .data = NULL,
+ .help_str = "Delete port meter profile",
+ .tokens = {
+ (void *)&cmd_del_port_meter_profile_del,
+ (void *)&cmd_del_port_meter_profile_port,
+ (void *)&cmd_del_port_meter_profile_meter,
+ (void *)&cmd_del_port_meter_profile_profile,
+ (void *)&cmd_del_port_meter_profile_port_id,
+ (void *)&cmd_del_port_meter_profile_profile_id,
+ NULL,
+ },
+};
+
+/* *** Create Port Meter Object *** */
+struct cmd_set_port_meter_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ uint16_t port_id;
+ uint32_t mtr_id;
+ uint32_t profile_id;
+ cmdline_fixed_string_t g_action;
+ cmdline_fixed_string_t y_action;
+ cmdline_fixed_string_t r_action;
+ uint64_t statistics_mask;
+ uint32_t shared;
+};
+
+cmdline_parse_token_string_t cmd_set_port_meter_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_meter_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_meter_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_result, meter, "meter");
+cmdline_parse_token_num_t cmd_set_port_meter_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_set_port_meter_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_result, mtr_id, UINT32);
+cmdline_parse_token_num_t cmd_set_port_meter_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_result, profile_id, UINT32);
+cmdline_parse_token_string_t cmd_set_port_meter_g_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result,
+ g_action, "R#Y#G#D");
+cmdline_parse_token_string_t cmd_set_port_meter_y_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result,
+ y_action, "R#Y#G#D");
+cmdline_parse_token_string_t cmd_set_port_meter_r_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_result,
+ r_action, "R#Y#G#D");
+cmdline_parse_token_num_t cmd_set_port_meter_statistics_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_meter_result,
+ statistics_mask, UINT64);
+cmdline_parse_token_num_t cmd_set_port_meter_shared =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_meter_result,
+ shared, UINT32);
+
+static void cmd_set_port_meter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_meter_result *res = parsed_result;
+ struct rte_mtr_error error;
+ struct rte_mtr_params params;
+ uint32_t mtr_id = res->mtr_id;
+ uint32_t shared = res->shared;
+ uint16_t port_id = res->port_id;
+
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Meter params */
+ memset(&params, 0, sizeof(struct rte_mtr_params));
+ params.meter_profile_id = res->profile_id;
+ params.use_prev_mtr_color = 1;
+ params.dscp_table = NULL;
+ params.meter_enable = 1;
+ params.action[RTE_MTR_GREEN] =
+ string_to_policer_action(res->g_action);
+ params.action[RTE_MTR_YELLOW] =
+ string_to_policer_action(res->y_action);
+ params.action[RTE_MTR_RED] =
+ string_to_policer_action(res->r_action);
+ params.stats_mask = res->statistics_mask;
+
+ ret = rte_mtr_create(port_id, mtr_id, &params, shared, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_meter = {
+ .f = cmd_set_port_meter_parsed,
+ .data = NULL,
+ .help_str = "Set port meter",
+ .tokens = {
+ (void *)&cmd_set_port_meter_set,
+ (void *)&cmd_set_port_meter_port,
+ (void *)&cmd_set_port_meter_meter,
+ (void *)&cmd_set_port_meter_port_id,
+ (void *)&cmd_set_port_meter_mtr_id,
+ (void *)&cmd_set_port_meter_profile_id,
+ (void *)&cmd_set_port_meter_g_action,
+ (void *)&cmd_set_port_meter_y_action,
+ (void *)&cmd_set_port_meter_r_action,
+ (void *)&cmd_set_port_meter_statistics_mask,
+ (void *)&cmd_set_port_meter_shared,
+ NULL,
+ },
+};
+
+/* *** Delete Port Meter Object *** */
+struct cmd_del_port_meter_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ uint16_t port_id;
+ uint32_t mtr_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_meter_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_meter_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_result, port, "port");
+cmdline_parse_token_string_t cmd_del_port_meter_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_meter_result, meter, "meter");
+cmdline_parse_token_num_t cmd_del_port_meter_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_meter_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_meter_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_meter_result, mtr_id, UINT32);
+
+static void cmd_del_port_meter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_meter_result *res = parsed_result;
+ struct rte_mtr_error error;
+ uint32_t mtr_id = res->mtr_id;
+ uint16_t port_id = res->port_id;
+
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Destroy Meter */
+ ret = rte_mtr_destroy(port_id, mtr_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_meter = {
+ .f = cmd_del_port_meter_parsed,
+ .data = NULL,
+ .help_str = "Delete port meter",
+ .tokens = {
+ (void *)&cmd_del_port_meter_del,
+ (void *)&cmd_del_port_meter_port,
+ (void *)&cmd_del_port_meter_meter,
+ (void *)&cmd_del_port_meter_port_id,
+ (void *)&cmd_del_port_meter_mtr_id,
+ NULL,
+ },
+};
+
+/* *** Set Port Meter Profile *** */
+struct cmd_set_port_meter_profile_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t mtr_id;
+ uint32_t profile_id;
+};
+
+cmdline_parse_token_string_t cmd_set_port_meter_profile_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_meter_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_meter_profile_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, meter, "meter");
+cmdline_parse_token_string_t cmd_set_port_meter_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, profile, "profile");
+cmdline_parse_token_num_t cmd_set_port_meter_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_set_port_meter_profile_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, mtr_id, UINT32);
+cmdline_parse_token_num_t cmd_set_port_meter_profile_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_profile_result, profile_id, UINT32);
+
+static void cmd_set_port_meter_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_meter_profile_result *res = parsed_result;
+ struct rte_mtr_error error;
+ uint32_t mtr_id = res->mtr_id;
+ uint32_t profile_id = res->profile_id;
+ uint16_t port_id = res->port_id;
+
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Set meter profile */
+ ret = rte_mtr_meter_profile_update(port_id, mtr_id,
+ profile_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_meter_profile = {
+ .f = cmd_set_port_meter_profile_parsed,
+ .data = NULL,
+ .help_str = "Set port meter profile",
+ .tokens = {
+ (void *)&cmd_set_port_meter_profile_set,
+ (void *)&cmd_set_port_meter_profile_port,
+ (void *)&cmd_set_port_meter_profile_meter,
+ (void *)&cmd_set_port_meter_profile_profile,
+ (void *)&cmd_set_port_meter_profile_port_id,
+ (void *)&cmd_set_port_meter_profile_mtr_id,
+ (void *)&cmd_set_port_meter_profile_profile_id,
+ NULL,
+ },
+};
+
+/* *** Set Port Meter Policer Action *** */
+struct cmd_set_port_meter_policer_action_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t policer;
+ cmdline_fixed_string_t action;
+ uint16_t port_id;
+ uint32_t mtr_id;
+ cmdline_fixed_string_t color;
+ cmdline_fixed_string_t policer_action;
+};
+
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, meter,
+ "meter");
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_policer =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, policer,
+ "policer");
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_action =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, action,
+ "action");
+cmdline_parse_token_num_t cmd_set_port_meter_policer_action_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, port_id,
+ UINT16);
+cmdline_parse_token_num_t cmd_set_port_meter_policer_action_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, mtr_id,
+ UINT32);
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_color =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result, color,
+ "G#Y#R");
+cmdline_parse_token_string_t cmd_set_port_meter_policer_action_policer_action =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_policer_action_result,
+ policer_action, "G#Y#R#D");
+
+static void cmd_set_port_meter_policer_action_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_meter_policer_action_result *res = parsed_result;
+ enum rte_mtr_color color;
+ enum rte_mtr_policer_action action[RTE_MTR_COLORS];
+ struct rte_mtr_error error;
+ uint32_t mtr_id = res->mtr_id;
+ uint16_t port_id = res->port_id;
+ char *c = res->color;
+ char *a = res->policer_action;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Color */
+ if (strcmp(c, "G") == 0)
+ color = RTE_MTR_GREEN;
+ else if (strcmp(c, "Y") == 0)
+ color = RTE_MTR_YELLOW;
+ else
+ color = RTE_MTR_RED;
+
+ /* Action */
+ if (strcmp(a, "G") == 0)
+ action[color] = MTR_POLICER_ACTION_COLOR_GREEN;
+ else if (strcmp(a, "Y") == 0)
+ action[color] = MTR_POLICER_ACTION_COLOR_YELLOW;
+ else if (strcmp(a, "R") == 0)
+ action[color] = MTR_POLICER_ACTION_COLOR_RED;
+ else
+ action[color] = MTR_POLICER_ACTION_DROP;
+
+ ret = rte_mtr_policer_actions_update(port_id, mtr_id,
+ 1 << color, action, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_meter_policer_action = {
+ .f = cmd_set_port_meter_policer_action_parsed,
+ .data = NULL,
+ .help_str = "Set port meter policer action",
+ .tokens = {
+ (void *)&cmd_set_port_meter_policer_action_set,
+ (void *)&cmd_set_port_meter_policer_action_port,
+ (void *)&cmd_set_port_meter_policer_action_meter,
+ (void *)&cmd_set_port_meter_policer_action_policer,
+ (void *)&cmd_set_port_meter_policer_action_action,
+ (void *)&cmd_set_port_meter_policer_action_port_id,
+ (void *)&cmd_set_port_meter_policer_action_mtr_id,
+ (void *)&cmd_set_port_meter_policer_action_color,
+ (void *)&cmd_set_port_meter_policer_action_policer_action,
+ NULL,
+ },
+};
+
+/* *** Set Port Meter Stats Mask *** */
+struct cmd_set_port_meter_stats_mask_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t stats;
+ cmdline_fixed_string_t mask;
+ uint16_t port_id;
+ uint32_t mtr_id;
+ uint64_t stats_mask;
+};
+
+cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, meter, "meter");
+cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_stats =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, stats, "stats");
+cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_mask =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, mask, "mask");
+cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, mtr_id, UINT32);
+cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_stats_mask =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_meter_stats_mask_result, stats_mask,
+ UINT64);
+
+static void cmd_set_port_meter_stats_mask_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_meter_stats_mask_result *res = parsed_result;
+ struct rte_mtr_error error;
+ uint64_t stats_mask = res->stats_mask;
+ uint32_t mtr_id = res->mtr_id;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_mtr_stats_update(port_id, mtr_id, stats_mask, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_meter_stats_mask = {
+ .f = cmd_set_port_meter_stats_mask_parsed,
+ .data = NULL,
+ .help_str = "Set port meter stats mask",
+ .tokens = {
+ (void *)&cmd_set_port_meter_stats_mask_set,
+ (void *)&cmd_set_port_meter_stats_mask_port,
+ (void *)&cmd_set_port_meter_stats_mask_meter,
+ (void *)&cmd_set_port_meter_stats_mask_stats,
+ (void *)&cmd_set_port_meter_stats_mask_mask,
+ (void *)&cmd_set_port_meter_stats_mask_port_id,
+ (void *)&cmd_set_port_meter_stats_mask_mtr_id,
+ (void *)&cmd_set_port_meter_stats_mask_stats_mask,
+ NULL,
+ },
+};
+
+/* *** Show Port Meter Stats *** */
+struct cmd_show_port_meter_stats_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t meter;
+ cmdline_fixed_string_t stats;
+ uint16_t port_id;
+ uint32_t mtr_id;
+ uint32_t clear;
+};
+
+cmdline_parse_token_string_t cmd_show_port_meter_stats_show =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, show, "show");
+cmdline_parse_token_string_t cmd_show_port_meter_stats_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, port, "port");
+cmdline_parse_token_string_t cmd_show_port_meter_stats_meter =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, meter, "meter");
+cmdline_parse_token_string_t cmd_show_port_meter_stats_stats =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, stats, "stats");
+cmdline_parse_token_num_t cmd_show_port_meter_stats_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_show_port_meter_stats_mtr_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, mtr_id, UINT32);
+cmdline_parse_token_num_t cmd_show_port_meter_stats_clear =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_meter_stats_result, clear, UINT32);
+
+static void cmd_show_port_meter_stats_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_meter_stats_result *res = parsed_result;
+ struct rte_mtr_stats stats;
+ uint64_t stats_mask = 0;
+ struct rte_mtr_error error;
+ uint32_t mtr_id = res->mtr_id;
+ uint32_t clear = res->clear;
+ uint16_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&stats, 0, sizeof(struct rte_mtr_stats));
+ ret = rte_mtr_stats_read(port_id, mtr_id, &stats,
+ &stats_mask, clear, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+
+ /* Display stats */
+ if (stats_mask & RTE_MTR_STATS_N_PKTS_GREEN)
+ printf("\tPkts G: %" PRIu64 "\n",
+ stats.n_pkts[RTE_MTR_GREEN]);
+ if (stats_mask & RTE_MTR_STATS_N_BYTES_GREEN)
+ printf("\tBytes G: %" PRIu64 "\n",
+ stats.n_bytes[RTE_MTR_GREEN]);
+ if (stats_mask & RTE_MTR_STATS_N_PKTS_YELLOW)
+ printf("\tPkts Y: %" PRIu64 "\n",
+ stats.n_pkts[RTE_MTR_YELLOW]);
+ if (stats_mask & RTE_MTR_STATS_N_BYTES_YELLOW)
+ printf("\tBytes Y: %" PRIu64 "\n",
+ stats.n_bytes[RTE_MTR_YELLOW]);
+ if (stats_mask & RTE_MTR_STATS_N_PKTS_RED)
+ printf("\tPkts R: %" PRIu64 "\n",
+ stats.n_pkts[RTE_MTR_RED]);
+ if (stats_mask & RTE_MTR_STATS_N_BYTES_RED)
+ printf("\tBytes Y: %" PRIu64 "\n",
+ stats.n_bytes[RTE_MTR_RED]);
+ if (stats_mask & RTE_MTR_STATS_N_PKTS_DROPPED)
+ printf("\tPkts DROPPED: %" PRIu64 "\n",
+ stats.n_pkts_dropped);
+ if (stats_mask & RTE_MTR_STATS_N_BYTES_DROPPED)
+ printf("\tBytes DROPPED: %" PRIu64 "\n",
+ stats.n_bytes_dropped);
+}
+
+cmdline_parse_inst_t cmd_show_port_meter_stats = {
+ .f = cmd_show_port_meter_stats_parsed,
+ .data = NULL,
+ .help_str = "Show port meter stats",
+ .tokens = {
+ (void *)&cmd_show_port_meter_stats_show,
+ (void *)&cmd_show_port_meter_stats_port,
+ (void *)&cmd_show_port_meter_stats_meter,
+ (void *)&cmd_show_port_meter_stats_stats,
+ (void *)&cmd_show_port_meter_stats_port_id,
+ (void *)&cmd_show_port_meter_stats_mtr_id,
+ (void *)&cmd_show_port_meter_stats_clear,
+ NULL,
+ },
+};
diff --git a/app/test-pmd/cmdline_mtr.h b/app/test-pmd/cmdline_mtr.h
new file mode 100644
index 00000000..5d599efc
--- /dev/null
+++ b/app/test-pmd/cmdline_mtr.h
@@ -0,0 +1,49 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_MTR_H_
+#define _CMDLINE_MTR_H_
+
+/* Traffic Metering and Policing */
+extern cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm;
+extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm;
+extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115;
+extern cmdline_parse_inst_t cmd_del_port_meter_profile;
+extern cmdline_parse_inst_t cmd_set_port_meter;
+extern cmdline_parse_inst_t cmd_del_port_meter;
+extern cmdline_parse_inst_t cmd_set_port_meter_profile;
+extern cmdline_parse_inst_t cmd_set_port_meter_policer_action;
+extern cmdline_parse_inst_t cmd_set_port_meter_stats_mask;
+extern cmdline_parse_inst_t cmd_show_port_meter_stats;
+
+#endif /* _CMDLINE_MTR_H_ */
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
new file mode 100644
index 00000000..4acef98f
--- /dev/null
+++ b/app/test-pmd/cmdline_tm.c
@@ -0,0 +1,2071 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_tm.h>
+
+#include "testpmd.h"
+#include "cmdline_tm.h"
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+#define MAX_NUM_SHARED_SHAPERS 256
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+/** Display TM Error Message */
+static void
+print_err_msg(struct rte_tm_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_TM_ERROR_TYPE_NONE] = "no error",
+ [RTE_TM_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_TM_ERROR_TYPE_CAPABILITIES]
+ = "capability parameter null",
+ [RTE_TM_ERROR_TYPE_LEVEL_ID] = "level id",
+ [RTE_TM_ERROR_TYPE_WRED_PROFILE]
+ = "wred profile null",
+ [RTE_TM_ERROR_TYPE_WRED_PROFILE_GREEN] = "wred profile(green)",
+ [RTE_TM_ERROR_TYPE_WRED_PROFILE_YELLOW]
+ = "wred profile(yellow)",
+ [RTE_TM_ERROR_TYPE_WRED_PROFILE_RED] = "wred profile(red)",
+ [RTE_TM_ERROR_TYPE_WRED_PROFILE_ID] = "wred profile id",
+ [RTE_TM_ERROR_TYPE_SHARED_WRED_CONTEXT_ID]
+ = "shared wred context id",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE] = "shaper profile null",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE]
+ = "committed rate field (shaper profile)",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE]
+ = "committed size field (shaper profile)",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE]
+ = "peak rate field (shaper profile)",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE]
+ = "peak size field (shaper profile)",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN]
+ = "packet adjust length field (shaper profile)",
+ [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID] = "shaper profile id",
+ [RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID] = "shared shaper id",
+ [RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID] = "parent node id",
+ [RTE_TM_ERROR_TYPE_NODE_PRIORITY] = "node priority",
+ [RTE_TM_ERROR_TYPE_NODE_WEIGHT] = "node weight",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS] = "node parameter null",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID]
+ = "shaper profile id field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID]
+ = "shared shaper id field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]
+ = "num shared shapers field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]
+ = "wfq weght mode field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]
+ = "num strict priorities field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]
+ = "congestion management mode field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID] =
+ "wred profile id field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID]
+ = "shared wred context id field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS]
+ = "num shared wred contexts field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS]
+ = "stats field (node params)",
+ [RTE_TM_ERROR_TYPE_NODE_ID] = "node id",
+ };
+
+ const char *errstr;
+ char buf[64];
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+
+ if (error->cause)
+ snprintf(buf, sizeof(buf), "cause: %p, ", error->cause);
+
+ printf("%s: %s%s (error %d)\n", errstr, error->cause ? buf : "",
+ error->message ? error->message : "(no stated reason)",
+ error->type);
+}
+
+static int
+read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+static int
+read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+static int
+parse_multi_ss_id_str(char *s_str, uint32_t *n_ssp, uint32_t shaper_id[])
+{
+ uint32_t n_shared_shapers = 0, i = 0;
+ char *token;
+
+ /* First token: num of shared shapers */
+ token = strtok_r(s_str, PARSE_DELIMITER, &s_str);
+ if (token == NULL)
+ return -1;
+
+ if (read_uint32(&n_shared_shapers, token))
+ return -1;
+
+ /* Check: num of shared shaper */
+ if (n_shared_shapers >= MAX_NUM_SHARED_SHAPERS) {
+ printf(" Number of shared shapers exceed the max (error)\n");
+ return -1;
+ }
+
+ /* Parse shared shaper ids */
+ while (1) {
+ token = strtok_r(s_str, PARSE_DELIMITER, &s_str);
+ if ((token != NULL && n_shared_shapers == 0) ||
+ (token == NULL && i < n_shared_shapers))
+ return -1;
+
+ if (token == NULL)
+ break;
+
+ if (read_uint32(&shaper_id[i], token))
+ return -1;
+ i++;
+ }
+ *n_ssp = n_shared_shapers;
+
+ return 0;
+}
+/* *** Port TM Capability *** */
+struct cmd_show_port_tm_cap_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t cap;
+ uint16_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_show_port_tm_cap_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_port_tm_cap_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_show_port_tm_cap_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result,
+ tm, "tm");
+cmdline_parse_token_string_t cmd_show_port_tm_cap_cap =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result,
+ cap, "cap");
+cmdline_parse_token_num_t cmd_show_port_tm_cap_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_cap_result,
+ port_id, UINT16);
+
+static void cmd_show_port_tm_cap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_tm_cap_result *res = parsed_result;
+ struct rte_tm_capabilities cap;
+ struct rte_tm_error error;
+ portid_t port_id = res->port_id;
+ uint32_t i;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&cap, 0, sizeof(struct rte_tm_capabilities));
+ ret = rte_tm_capabilities_get(port_id, &cap, &error);
+ if (ret) {
+ print_err_msg(&error);
+ return;
+ }
+
+ printf("\n**** Port TM Capabilities ****\n\n");
+ printf("cap.n_nodes_max %" PRIu32 "\n", cap.n_nodes_max);
+ printf("cap.n_levels_max %" PRIu32 "\n", cap.n_levels_max);
+ printf("cap.non_leaf_nodes_identical %" PRId32 "\n",
+ cap.non_leaf_nodes_identical);
+ printf("cap.leaf_nodes_identical %" PRId32 "\n",
+ cap.leaf_nodes_identical);
+ printf("cap.shaper_n_max %u\n", cap.shaper_n_max);
+ printf("cap.shaper_private_n_max %" PRIu32 "\n",
+ cap.shaper_private_n_max);
+ printf("cap.shaper_private_dual_rate_n_max %" PRId32 "\n",
+ cap.shaper_private_dual_rate_n_max);
+ printf("cap.shaper_private_rate_min %" PRIu64 "\n",
+ cap.shaper_private_rate_min);
+ printf("cap.shaper_private_rate_max %" PRIu64 "\n",
+ cap.shaper_private_rate_max);
+ printf("cap.shaper_shared_n_max %" PRIu32 "\n",
+ cap.shaper_shared_n_max);
+ printf("cap.shaper_shared_n_nodes_per_shaper_max %" PRIu32 "\n",
+ cap.shaper_shared_n_nodes_per_shaper_max);
+ printf("cap.shaper_shared_n_shapers_per_node_max %" PRIu32 "\n",
+ cap.shaper_shared_n_shapers_per_node_max);
+ printf("cap.shaper_shared_dual_rate_n_max %" PRIu32 "\n",
+ cap.shaper_shared_dual_rate_n_max);
+ printf("cap.shaper_shared_rate_min %" PRIu64 "\n",
+ cap.shaper_shared_rate_min);
+ printf("cap.shaper_shared_rate_max %" PRIu64 "\n",
+ cap.shaper_shared_rate_max);
+ printf("cap.shaper_pkt_length_adjust_min %" PRId32 "\n",
+ cap.shaper_pkt_length_adjust_min);
+ printf("cap.shaper_pkt_length_adjust_max %" PRId32 "\n",
+ cap.shaper_pkt_length_adjust_max);
+ printf("cap.sched_n_children_max %" PRIu32 "\n",
+ cap.sched_n_children_max);
+ printf("cap.sched_sp_n_priorities_max %" PRIu32 "\n",
+ cap.sched_sp_n_priorities_max);
+ printf("cap.sched_wfq_n_children_per_group_max %" PRIu32 "\n",
+ cap.sched_wfq_n_children_per_group_max);
+ printf("cap.sched_wfq_n_groups_max %" PRIu32 "\n",
+ cap.sched_wfq_n_groups_max);
+ printf("cap.sched_wfq_weight_max %" PRIu32 "\n",
+ cap.sched_wfq_weight_max);
+ printf("cap.cman_head_drop_supported %" PRId32 "\n",
+ cap.cman_head_drop_supported);
+ printf("cap.cman_wred_context_n_max %" PRIu32 "\n",
+ cap.cman_wred_context_n_max);
+ printf("cap.cman_wred_context_private_n_max %" PRIu32 "\n",
+ cap.cman_wred_context_private_n_max);
+ printf("cap.cman_wred_context_shared_n_max %" PRIu32 "\n",
+ cap.cman_wred_context_shared_n_max);
+ printf("cap.cman_wred_context_shared_n_nodes_per_context_max %" PRIu32
+ "\n", cap.cman_wred_context_shared_n_nodes_per_context_max);
+ printf("cap.cman_wred_context_shared_n_contexts_per_node_max %" PRIu32
+ "\n", cap.cman_wred_context_shared_n_contexts_per_node_max);
+
+ for (i = 0; i < RTE_TM_COLORS; i++) {
+ printf("cap.mark_vlan_dei_supported %" PRId32 "\n",
+ cap.mark_vlan_dei_supported[i]);
+ printf("cap.mark_ip_ecn_tcp_supported %" PRId32 "\n",
+ cap.mark_ip_ecn_tcp_supported[i]);
+ printf("cap.mark_ip_ecn_sctp_supported %" PRId32 "\n",
+ cap.mark_ip_ecn_sctp_supported[i]);
+ printf("cap.mark_ip_dscp_supported %" PRId32 "\n",
+ cap.mark_ip_dscp_supported[i]);
+ }
+
+ printf("cap.dynamic_update_mask %" PRIx64 "\n",
+ cap.dynamic_update_mask);
+ printf("cap.stats_mask %" PRIx64 "\n", cap.stats_mask);
+}
+
+cmdline_parse_inst_t cmd_show_port_tm_cap = {
+ .f = cmd_show_port_tm_cap_parsed,
+ .data = NULL,
+ .help_str = "Show Port TM Capabilities",
+ .tokens = {
+ (void *)&cmd_show_port_tm_cap_show,
+ (void *)&cmd_show_port_tm_cap_port,
+ (void *)&cmd_show_port_tm_cap_tm,
+ (void *)&cmd_show_port_tm_cap_cap,
+ (void *)&cmd_show_port_tm_cap_port_id,
+ NULL,
+ },
+};
+
+/* *** Port TM Hierarchical Level Capability *** */
+struct cmd_show_port_tm_level_cap_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t level;
+ cmdline_fixed_string_t cap;
+ uint16_t port_id;
+ uint32_t level_id;
+};
+
+cmdline_parse_token_string_t cmd_show_port_tm_level_cap_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_port_tm_level_cap_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_show_port_tm_level_cap_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ tm, "tm");
+cmdline_parse_token_string_t cmd_show_port_tm_level_cap_level =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ level, "level");
+cmdline_parse_token_string_t cmd_show_port_tm_level_cap_cap =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ cap, "cap");
+cmdline_parse_token_num_t cmd_show_port_tm_level_cap_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_show_port_tm_level_cap_level_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_level_cap_result,
+ level_id, UINT32);
+
+
+static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_tm_level_cap_result *res = parsed_result;
+ struct rte_tm_level_capabilities lcap;
+ struct rte_tm_error error;
+ portid_t port_id = res->port_id;
+ uint32_t level_id = res->level_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&lcap, 0, sizeof(struct rte_tm_level_capabilities));
+ ret = rte_tm_level_capabilities_get(port_id, level_id, &lcap, &error);
+ if (ret) {
+ print_err_msg(&error);
+ return;
+ }
+ printf("\n** Port TM Hierarchy level %" PRIu32 " Capability **\n\n",
+ level_id);
+
+ printf("cap.n_nodes_max %" PRIu32 "\n", lcap.n_nodes_max);
+ printf("cap.n_nodes_nonleaf_max %" PRIu32 "\n",
+ lcap.n_nodes_nonleaf_max);
+ printf("cap.n_nodes_leaf_max %" PRIu32 "\n", lcap.n_nodes_leaf_max);
+ printf("cap.non_leaf_nodes_identical %" PRId32 "\n",
+ lcap.non_leaf_nodes_identical);
+ printf("cap.leaf_nodes_identical %" PRId32 "\n",
+ lcap.leaf_nodes_identical);
+ if (level_id <= 3) {
+ printf("cap.nonleaf.shaper_private_supported %" PRId32 "\n",
+ lcap.nonleaf.shaper_private_supported);
+ printf("cap.nonleaf.shaper_private_dual_rate_supported %" PRId32
+ "\n", lcap.nonleaf.shaper_private_dual_rate_supported);
+ printf("cap.nonleaf.shaper_private_rate_min %" PRIu64 "\n",
+ lcap.nonleaf.shaper_private_rate_min);
+ printf("cap.nonleaf.shaper_private_rate_max %" PRIu64 "\n",
+ lcap.nonleaf.shaper_private_rate_max);
+ printf("cap.nonleaf.shaper_shared_n_max %" PRIu32 "\n",
+ lcap.nonleaf.shaper_shared_n_max);
+ printf("cap.nonleaf.sched_n_children_max %" PRIu32 "\n",
+ lcap.nonleaf.sched_n_children_max);
+ printf("cap.nonleaf.sched_sp_n_priorities_max %" PRIu32 "\n",
+ lcap.nonleaf.sched_sp_n_priorities_max);
+ printf("cap.nonleaf.sched_wfq_n_children_per_group_max %" PRIu32
+ "\n", lcap.nonleaf.sched_wfq_n_children_per_group_max);
+ printf("cap.nonleaf.sched_wfq_n_groups_max %" PRIu32 "\n",
+ lcap.nonleaf.sched_wfq_n_groups_max);
+ printf("cap.nonleaf.sched_wfq_weight_max %" PRIu32 "\n",
+ lcap.nonleaf.sched_wfq_weight_max);
+ printf("cap.nonleaf.stats_mask %" PRIx64 "\n",
+ lcap.nonleaf.stats_mask);
+ } else {
+ printf("cap.leaf.shaper_private_supported %" PRId32 "\n",
+ lcap.leaf.shaper_private_supported);
+ printf("cap.leaf.shaper_private_dual_rate_supported %" PRId32
+ "\n", lcap.leaf.shaper_private_dual_rate_supported);
+ printf("cap.leaf.shaper_private_rate_min %" PRIu64 "\n",
+ lcap.leaf.shaper_private_rate_min);
+ printf("cap.leaf.shaper_private_rate_max %" PRIu64 "\n",
+ lcap.leaf.shaper_private_rate_max);
+ printf("cap.leaf.shaper_shared_n_max %" PRIu32 "\n",
+ lcap.leaf.shaper_shared_n_max);
+ printf("cap.leaf.cman_head_drop_supported %" PRId32 "\n",
+ lcap.leaf.cman_head_drop_supported);
+ printf("cap.leaf.cman_wred_context_private_supported %" PRId32
+ "\n", lcap.leaf.cman_wred_context_private_supported);
+ printf("cap.leaf.cman_wred_context_shared_n_max %" PRIu32 "\n",
+ lcap.leaf.cman_wred_context_shared_n_max);
+ printf("cap.leaf.stats_mask %" PRIx64 "\n",
+ lcap.leaf.stats_mask);
+ }
+}
+
+cmdline_parse_inst_t cmd_show_port_tm_level_cap = {
+ .f = cmd_show_port_tm_level_cap_parsed,
+ .data = NULL,
+ .help_str = "Show Port TM Hierarhical level Capabilities",
+ .tokens = {
+ (void *)&cmd_show_port_tm_level_cap_show,
+ (void *)&cmd_show_port_tm_level_cap_port,
+ (void *)&cmd_show_port_tm_level_cap_tm,
+ (void *)&cmd_show_port_tm_level_cap_level,
+ (void *)&cmd_show_port_tm_level_cap_cap,
+ (void *)&cmd_show_port_tm_level_cap_port_id,
+ (void *)&cmd_show_port_tm_level_cap_level_id,
+ NULL,
+ },
+};
+
+/* *** Port TM Hierarchy Node Capability *** */
+struct cmd_show_port_tm_node_cap_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t cap;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_show_port_tm_node_cap_show =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_port_tm_node_cap_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_show_port_tm_node_cap_tm =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ tm, "tm");
+cmdline_parse_token_string_t cmd_show_port_tm_node_cap_node =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ node, "node");
+cmdline_parse_token_string_t cmd_show_port_tm_node_cap_cap =
+ TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ cap, "cap");
+cmdline_parse_token_num_t cmd_show_port_tm_node_cap_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_show_port_tm_node_cap_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_cap_result,
+ node_id, UINT32);
+
+static void cmd_show_port_tm_node_cap_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_tm_node_cap_result *res = parsed_result;
+ struct rte_tm_node_capabilities ncap;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret, is_leaf = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Node id must be valid */
+ ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+
+ memset(&ncap, 0, sizeof(struct rte_tm_node_capabilities));
+ ret = rte_tm_node_capabilities_get(port_id, node_id, &ncap, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+ printf("\n** Port TM Hierarchy node %" PRIu32 " Capability **\n\n",
+ node_id);
+ printf("cap.shaper_private_supported %" PRId32 "\n",
+ ncap.shaper_private_supported);
+ printf("cap.shaper_private_dual_rate_supported %" PRId32 "\n",
+ ncap.shaper_private_dual_rate_supported);
+ printf("cap.shaper_private_rate_min %" PRIu64 "\n",
+ ncap.shaper_private_rate_min);
+ printf("cap.shaper_private_rate_max %" PRIu64 "\n",
+ ncap.shaper_private_rate_max);
+ printf("cap.shaper_shared_n_max %" PRIu32 "\n",
+ ncap.shaper_shared_n_max);
+ if (!is_leaf) {
+ printf("cap.nonleaf.sched_n_children_max %" PRIu32 "\n",
+ ncap.nonleaf.sched_n_children_max);
+ printf("cap.nonleaf.sched_sp_n_priorities_max %" PRIu32 "\n",
+ ncap.nonleaf.sched_sp_n_priorities_max);
+ printf("cap.nonleaf.sched_wfq_n_children_per_group_max %" PRIu32
+ "\n", ncap.nonleaf.sched_wfq_n_children_per_group_max);
+ printf("cap.nonleaf.sched_wfq_n_groups_max %" PRIu32 "\n",
+ ncap.nonleaf.sched_wfq_n_groups_max);
+ printf("cap.nonleaf.sched_wfq_weight_max %" PRIu32 "\n",
+ ncap.nonleaf.sched_wfq_weight_max);
+ } else {
+ printf("cap.leaf.cman_head_drop_supported %" PRId32 "\n",
+ ncap.leaf.cman_head_drop_supported);
+ printf("cap.leaf.cman_wred_context_private_supported %" PRId32
+ "\n", ncap.leaf.cman_wred_context_private_supported);
+ printf("cap.leaf.cman_wred_context_shared_n_max %" PRIu32 "\n",
+ ncap.leaf.cman_wred_context_shared_n_max);
+ }
+ printf("cap.stats_mask %" PRIx64 "\n", ncap.stats_mask);
+}
+
+cmdline_parse_inst_t cmd_show_port_tm_node_cap = {
+ .f = cmd_show_port_tm_node_cap_parsed,
+ .data = NULL,
+ .help_str = "Show Port TM Hierarchy node capabilities",
+ .tokens = {
+ (void *)&cmd_show_port_tm_node_cap_show,
+ (void *)&cmd_show_port_tm_node_cap_port,
+ (void *)&cmd_show_port_tm_node_cap_tm,
+ (void *)&cmd_show_port_tm_node_cap_node,
+ (void *)&cmd_show_port_tm_node_cap_cap,
+ (void *)&cmd_show_port_tm_node_cap_port_id,
+ (void *)&cmd_show_port_tm_node_cap_node_id,
+ NULL,
+ },
+};
+
+/* *** Show Port TM Node Statistics *** */
+struct cmd_show_port_tm_node_stats_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t stats;
+ uint16_t port_id;
+ uint32_t node_id;
+ uint32_t clear;
+};
+
+cmdline_parse_token_string_t cmd_show_port_tm_node_stats_show =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, show, "show");
+cmdline_parse_token_string_t cmd_show_port_tm_node_stats_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, port, "port");
+cmdline_parse_token_string_t cmd_show_port_tm_node_stats_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, tm, "tm");
+cmdline_parse_token_string_t cmd_show_port_tm_node_stats_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, node, "node");
+cmdline_parse_token_string_t cmd_show_port_tm_node_stats_stats =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, stats, "stats");
+cmdline_parse_token_num_t cmd_show_port_tm_node_stats_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_stats_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_show_port_tm_node_stats_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result,
+ node_id, UINT32);
+cmdline_parse_token_num_t cmd_show_port_tm_node_stats_clear =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_tm_node_stats_result, clear, UINT32);
+
+static void cmd_show_port_tm_node_stats_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_tm_node_stats_result *res = parsed_result;
+ struct rte_tm_node_stats stats;
+ struct rte_tm_error error;
+ uint64_t stats_mask = 0;
+ uint32_t node_id = res->node_id;
+ uint32_t clear = res->clear;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (!port_is_started(port_id)) {
+ printf(" Port %u not started (error)\n", port_id);
+ return;
+ }
+
+ memset(&stats, 0, sizeof(struct rte_tm_node_stats));
+ ret = rte_tm_node_stats_read(port_id, node_id, &stats,
+ &stats_mask, clear, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+
+ /* Display stats */
+ if (stats_mask & RTE_TM_STATS_N_PKTS)
+ printf("\tPkts scheduled from node: %" PRIu64 "\n",
+ stats.n_pkts);
+ if (stats_mask & RTE_TM_STATS_N_BYTES)
+ printf("\tBytes scheduled from node: %" PRIu64 "\n",
+ stats.n_bytes);
+ if (stats_mask & RTE_TM_STATS_N_PKTS_GREEN_DROPPED)
+ printf("\tPkts dropped (green): %" PRIu64 "\n",
+ stats.leaf.n_pkts_dropped[RTE_TM_GREEN]);
+ if (stats_mask & RTE_TM_STATS_N_PKTS_YELLOW_DROPPED)
+ printf("\tPkts dropped (yellow): %" PRIu64 "\n",
+ stats.leaf.n_pkts_dropped[RTE_TM_YELLOW]);
+ if (stats_mask & RTE_TM_STATS_N_PKTS_RED_DROPPED)
+ printf("\tPkts dropped (red): %" PRIu64 "\n",
+ stats.leaf.n_pkts_dropped[RTE_TM_RED]);
+ if (stats_mask & RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+ printf("\tBytes dropped (green): %" PRIu64 "\n",
+ stats.leaf.n_bytes_dropped[RTE_TM_GREEN]);
+ if (stats_mask & RTE_TM_STATS_N_BYTES_YELLOW_DROPPED)
+ printf("\tBytes dropped (yellow): %" PRIu64 "\n",
+ stats.leaf.n_bytes_dropped[RTE_TM_YELLOW]);
+ if (stats_mask & RTE_TM_STATS_N_BYTES_RED_DROPPED)
+ printf("\tBytes dropped (red): %" PRIu64 "\n",
+ stats.leaf.n_bytes_dropped[RTE_TM_RED]);
+ if (stats_mask & RTE_TM_STATS_N_PKTS_QUEUED)
+ printf("\tPkts queued: %" PRIu64 "\n",
+ stats.leaf.n_pkts_queued);
+ if (stats_mask & RTE_TM_STATS_N_BYTES_QUEUED)
+ printf("\tBytes queued: %" PRIu64 "\n",
+ stats.leaf.n_bytes_queued);
+}
+
+cmdline_parse_inst_t cmd_show_port_tm_node_stats = {
+ .f = cmd_show_port_tm_node_stats_parsed,
+ .data = NULL,
+ .help_str = "Show port tm node stats",
+ .tokens = {
+ (void *)&cmd_show_port_tm_node_stats_show,
+ (void *)&cmd_show_port_tm_node_stats_port,
+ (void *)&cmd_show_port_tm_node_stats_tm,
+ (void *)&cmd_show_port_tm_node_stats_node,
+ (void *)&cmd_show_port_tm_node_stats_stats,
+ (void *)&cmd_show_port_tm_node_stats_port_id,
+ (void *)&cmd_show_port_tm_node_stats_node_id,
+ (void *)&cmd_show_port_tm_node_stats_clear,
+ NULL,
+ },
+};
+
+/* *** Show Port TM Node Type *** */
+struct cmd_show_port_tm_node_type_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t type;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_show_port_tm_node_type_show =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result, show, "show");
+cmdline_parse_token_string_t cmd_show_port_tm_node_type_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result, port, "port");
+cmdline_parse_token_string_t cmd_show_port_tm_node_type_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result, tm, "tm");
+cmdline_parse_token_string_t cmd_show_port_tm_node_type_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result, node, "node");
+cmdline_parse_token_string_t cmd_show_port_tm_node_type_type =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result, type, "type");
+cmdline_parse_token_num_t cmd_show_port_tm_node_type_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_show_port_tm_node_type_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_show_port_tm_node_type_result,
+ node_id, UINT32);
+
+static void cmd_show_port_tm_node_type_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_port_tm_node_type_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret, is_leaf = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+
+ if (is_leaf == 1)
+ printf("leaf node\n");
+ else
+ printf("nonleaf node\n");
+
+}
+
+cmdline_parse_inst_t cmd_show_port_tm_node_type = {
+ .f = cmd_show_port_tm_node_type_parsed,
+ .data = NULL,
+ .help_str = "Show port tm node type",
+ .tokens = {
+ (void *)&cmd_show_port_tm_node_type_show,
+ (void *)&cmd_show_port_tm_node_type_port,
+ (void *)&cmd_show_port_tm_node_type_tm,
+ (void *)&cmd_show_port_tm_node_type_node,
+ (void *)&cmd_show_port_tm_node_type_type,
+ (void *)&cmd_show_port_tm_node_type_port_id,
+ (void *)&cmd_show_port_tm_node_type_node_id,
+ NULL,
+ },
+};
+
+/* *** Add Port TM Private Shaper Profile *** */
+struct cmd_add_port_tm_node_shaper_profile_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t shaper;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t shaper_id;
+ uint64_t tb_rate;
+ uint64_t tb_size;
+ uint32_t pktlen_adjust;
+};
+
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ tm, "tm");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ node, "node");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_shaper =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ shaper, "shaper");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_shaper_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ shaper_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_tb_rate =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ tb_rate, UINT64);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_tb_size =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ tb_size, UINT64);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_pktlen_adjust =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ pktlen_adjust, UINT32);
+
+static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_tm_node_shaper_profile_result *res = parsed_result;
+ struct rte_tm_shaper_params sp;
+ struct rte_tm_error error;
+ uint32_t shaper_id = res->shaper_id;
+ uint32_t pkt_len_adjust = res->pktlen_adjust;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Private shaper profile params */
+ memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+ sp.peak.rate = res->tb_rate;
+ sp.peak.size = res->tb_size;
+ sp.pkt_length_adjust = pkt_len_adjust;
+
+ ret = rte_tm_shaper_profile_add(port_id, shaper_id, &sp, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_tm_node_shaper_profile = {
+ .f = cmd_add_port_tm_node_shaper_profile_parsed,
+ .data = NULL,
+ .help_str = "Add port tm node private shaper profile",
+ .tokens = {
+ (void *)&cmd_add_port_tm_node_shaper_profile_add,
+ (void *)&cmd_add_port_tm_node_shaper_profile_port,
+ (void *)&cmd_add_port_tm_node_shaper_profile_tm,
+ (void *)&cmd_add_port_tm_node_shaper_profile_node,
+ (void *)&cmd_add_port_tm_node_shaper_profile_shaper,
+ (void *)&cmd_add_port_tm_node_shaper_profile_profile,
+ (void *)&cmd_add_port_tm_node_shaper_profile_port_id,
+ (void *)&cmd_add_port_tm_node_shaper_profile_shaper_id,
+ (void *)&cmd_add_port_tm_node_shaper_profile_tb_rate,
+ (void *)&cmd_add_port_tm_node_shaper_profile_tb_size,
+ (void *)&cmd_add_port_tm_node_shaper_profile_pktlen_adjust,
+ NULL,
+ },
+};
+
+/* *** Delete Port TM Private Shaper Profile *** */
+struct cmd_del_port_tm_node_shaper_profile_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t shaper;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t shaper_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result, tm, "tm");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ node, "node");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_shaper =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ shaper, "shaper");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_del_port_tm_node_shaper_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_tm_node_shaper_profile_shaper_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_shaper_profile_result,
+ shaper_id, UINT32);
+
+static void cmd_del_port_tm_node_shaper_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_tm_node_shaper_profile_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t shaper_id = res->shaper_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_shaper_profile_delete(port_id, shaper_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_tm_node_shaper_profile = {
+ .f = cmd_del_port_tm_node_shaper_profile_parsed,
+ .data = NULL,
+ .help_str = "Delete port tm node private shaper profile",
+ .tokens = {
+ (void *)&cmd_del_port_tm_node_shaper_profile_del,
+ (void *)&cmd_del_port_tm_node_shaper_profile_port,
+ (void *)&cmd_del_port_tm_node_shaper_profile_tm,
+ (void *)&cmd_del_port_tm_node_shaper_profile_node,
+ (void *)&cmd_del_port_tm_node_shaper_profile_shaper,
+ (void *)&cmd_del_port_tm_node_shaper_profile_profile,
+ (void *)&cmd_del_port_tm_node_shaper_profile_port_id,
+ (void *)&cmd_del_port_tm_node_shaper_profile_shaper_id,
+ NULL,
+ },
+};
+
+/* *** Add/Update Port TM shared Shaper *** */
+struct cmd_add_port_tm_node_shared_shaper_result {
+ cmdline_fixed_string_t cmd_type;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t shared;
+ cmdline_fixed_string_t shaper;
+ uint16_t port_id;
+ uint32_t shared_shaper_id;
+ uint32_t shaper_profile_id;
+};
+
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_cmd_type =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ cmd_type, "add#set");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result, port, "port");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result, tm, "tm");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result, node, "node");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_shared =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ shared, "shared");
+cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_shaper =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ shaper, "shaper");
+cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_shared_shaper_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ shared_shaper_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_shaper_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shared_shaper_result,
+ shaper_profile_id, UINT32);
+
+static void cmd_add_port_tm_node_shared_shaper_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_tm_node_shared_shaper_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t shared_shaper_id = res->shared_shaper_id;
+ uint32_t shaper_profile_id = res->shaper_profile_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Command type: add */
+ if ((strcmp(res->cmd_type, "add") == 0) &&
+ (port_is_started(port_id))) {
+ printf(" Port %u not stopped (error)\n", port_id);
+ return;
+ }
+
+ /* Command type: set (update) */
+ if ((strcmp(res->cmd_type, "set") == 0) &&
+ (!port_is_started(port_id))) {
+ printf(" Port %u not started (error)\n", port_id);
+ return;
+ }
+
+ ret = rte_tm_shared_shaper_add_update(port_id, shared_shaper_id,
+ shaper_profile_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_tm_node_shared_shaper = {
+ .f = cmd_add_port_tm_node_shared_shaper_parsed,
+ .data = NULL,
+ .help_str = "add/update port tm node shared shaper",
+ .tokens = {
+ (void *)&cmd_add_port_tm_node_shared_shaper_cmd_type,
+ (void *)&cmd_add_port_tm_node_shared_shaper_port,
+ (void *)&cmd_add_port_tm_node_shared_shaper_tm,
+ (void *)&cmd_add_port_tm_node_shared_shaper_node,
+ (void *)&cmd_add_port_tm_node_shared_shaper_shared,
+ (void *)&cmd_add_port_tm_node_shared_shaper_shaper,
+ (void *)&cmd_add_port_tm_node_shared_shaper_port_id,
+ (void *)&cmd_add_port_tm_node_shared_shaper_shared_shaper_id,
+ (void *)&cmd_add_port_tm_node_shared_shaper_shaper_profile_id,
+ NULL,
+ },
+};
+
+/* *** Delete Port TM shared Shaper *** */
+struct cmd_del_port_tm_node_shared_shaper_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t shared;
+ cmdline_fixed_string_t shaper;
+ uint16_t port_id;
+ uint32_t shared_shaper_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result, port, "port");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result, tm, "tm");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result, node, "node");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_shared =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result,
+ shared, "shared");
+cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_shaper =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result,
+ shaper, "shaper");
+cmdline_parse_token_num_t cmd_del_port_tm_node_shared_shaper_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_tm_node_shared_shaper_shared_shaper_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_shared_shaper_result,
+ shared_shaper_id, UINT32);
+
+static void cmd_del_port_tm_node_shared_shaper_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_tm_node_shared_shaper_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t shared_shaper_id = res->shared_shaper_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_shared_shaper_delete(port_id, shared_shaper_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_tm_node_shared_shaper = {
+ .f = cmd_del_port_tm_node_shared_shaper_parsed,
+ .data = NULL,
+ .help_str = "delete port tm node shared shaper",
+ .tokens = {
+ (void *)&cmd_del_port_tm_node_shared_shaper_del,
+ (void *)&cmd_del_port_tm_node_shared_shaper_port,
+ (void *)&cmd_del_port_tm_node_shared_shaper_tm,
+ (void *)&cmd_del_port_tm_node_shared_shaper_node,
+ (void *)&cmd_del_port_tm_node_shared_shaper_shared,
+ (void *)&cmd_del_port_tm_node_shared_shaper_shaper,
+ (void *)&cmd_del_port_tm_node_shared_shaper_port_id,
+ (void *)&cmd_del_port_tm_node_shared_shaper_shared_shaper_id,
+ NULL,
+ },
+};
+
+/* *** Add Port TM Node WRED Profile *** */
+struct cmd_add_port_tm_node_wred_profile_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t wred;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t wred_profile_id;
+ cmdline_fixed_string_t color_g;
+ uint16_t min_th_g;
+ uint16_t max_th_g;
+ uint16_t maxp_inv_g;
+ uint16_t wq_log2_g;
+ cmdline_fixed_string_t color_y;
+ uint16_t min_th_y;
+ uint16_t max_th_y;
+ uint16_t maxp_inv_y;
+ uint16_t wq_log2_y;
+ cmdline_fixed_string_t color_r;
+ uint16_t min_th_r;
+ uint16_t max_th_r;
+ uint16_t maxp_inv_r;
+ uint16_t wq_log2_r;
+};
+
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result, port, "port");
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result, tm, "tm");
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result, node, "node");
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_wred =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result, wred, "wred");
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wred_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ wred_profile_id, UINT32);
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_g =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ color_g, "G#g");
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_g =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ min_th_g, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_g =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ max_th_g, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_g =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ maxp_inv_g, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_g =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ wq_log2_g, UINT16);
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_y =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ color_y, "Y#y");
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_y =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ min_th_y, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_y =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ max_th_y, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_y =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ maxp_inv_y, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_y =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ wq_log2_y, UINT16);
+cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_r =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ color_r, "R#r");
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_r =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ min_th_r, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_r =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ max_th_r, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_r =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ maxp_inv_r, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_r =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_wred_profile_result,
+ wq_log2_r, UINT16);
+
+
+static void cmd_add_port_tm_node_wred_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_tm_node_wred_profile_result *res = parsed_result;
+ struct rte_tm_wred_params wp;
+ enum rte_tm_color color;
+ struct rte_tm_error error;
+ uint32_t wred_profile_id = res->wred_profile_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&wp, 0, sizeof(struct rte_tm_wred_params));
+
+ /* WRED Params (Green Color)*/
+ color = RTE_TM_GREEN;
+ wp.red_params[color].min_th = res->min_th_g;
+ wp.red_params[color].max_th = res->max_th_g;
+ wp.red_params[color].maxp_inv = res->maxp_inv_g;
+ wp.red_params[color].wq_log2 = res->wq_log2_g;
+
+
+ /* WRED Params (Yellow Color)*/
+ color = RTE_TM_YELLOW;
+ wp.red_params[color].min_th = res->min_th_y;
+ wp.red_params[color].max_th = res->max_th_y;
+ wp.red_params[color].maxp_inv = res->maxp_inv_y;
+ wp.red_params[color].wq_log2 = res->wq_log2_y;
+
+ /* WRED Params (Red Color)*/
+ color = RTE_TM_RED;
+ wp.red_params[color].min_th = res->min_th_r;
+ wp.red_params[color].max_th = res->max_th_r;
+ wp.red_params[color].maxp_inv = res->maxp_inv_r;
+ wp.red_params[color].wq_log2 = res->wq_log2_r;
+
+ ret = rte_tm_wred_profile_add(port_id, wred_profile_id, &wp, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_tm_node_wred_profile = {
+ .f = cmd_add_port_tm_node_wred_profile_parsed,
+ .data = NULL,
+ .help_str = "Add port tm node wred profile",
+ .tokens = {
+ (void *)&cmd_add_port_tm_node_wred_profile_add,
+ (void *)&cmd_add_port_tm_node_wred_profile_port,
+ (void *)&cmd_add_port_tm_node_wred_profile_tm,
+ (void *)&cmd_add_port_tm_node_wred_profile_node,
+ (void *)&cmd_add_port_tm_node_wred_profile_wred,
+ (void *)&cmd_add_port_tm_node_wred_profile_profile,
+ (void *)&cmd_add_port_tm_node_wred_profile_port_id,
+ (void *)&cmd_add_port_tm_node_wred_profile_wred_profile_id,
+ (void *)&cmd_add_port_tm_node_wred_profile_color_g,
+ (void *)&cmd_add_port_tm_node_wred_profile_min_th_g,
+ (void *)&cmd_add_port_tm_node_wred_profile_max_th_g,
+ (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_g,
+ (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_g,
+ (void *)&cmd_add_port_tm_node_wred_profile_color_y,
+ (void *)&cmd_add_port_tm_node_wred_profile_min_th_y,
+ (void *)&cmd_add_port_tm_node_wred_profile_max_th_y,
+ (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_y,
+ (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_y,
+ (void *)&cmd_add_port_tm_node_wred_profile_color_r,
+ (void *)&cmd_add_port_tm_node_wred_profile_min_th_r,
+ (void *)&cmd_add_port_tm_node_wred_profile_max_th_r,
+ (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_r,
+ (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_r,
+ NULL,
+ },
+};
+
+/* *** Delete Port TM node WRED Profile *** */
+struct cmd_del_port_tm_node_wred_profile_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t wred;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t wred_profile_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result, port, "port");
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result, tm, "tm");
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result, node, "node");
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_wred =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result, wred, "wred");
+cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_del_port_tm_node_wred_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_tm_node_wred_profile_wred_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_del_port_tm_node_wred_profile_result,
+ wred_profile_id, UINT32);
+
+static void cmd_del_port_tm_node_wred_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_tm_node_wred_profile_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t wred_profile_id = res->wred_profile_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ret = rte_tm_wred_profile_delete(port_id, wred_profile_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_tm_node_wred_profile = {
+ .f = cmd_del_port_tm_node_wred_profile_parsed,
+ .data = NULL,
+ .help_str = "Delete port tm node wred profile",
+ .tokens = {
+ (void *)&cmd_del_port_tm_node_wred_profile_del,
+ (void *)&cmd_del_port_tm_node_wred_profile_port,
+ (void *)&cmd_del_port_tm_node_wred_profile_tm,
+ (void *)&cmd_del_port_tm_node_wred_profile_node,
+ (void *)&cmd_del_port_tm_node_wred_profile_wred,
+ (void *)&cmd_del_port_tm_node_wred_profile_profile,
+ (void *)&cmd_del_port_tm_node_wred_profile_port_id,
+ (void *)&cmd_del_port_tm_node_wred_profile_wred_profile_id,
+ NULL,
+ },
+};
+
+/* *** Update Port TM Node Shaper profile *** */
+struct cmd_set_port_tm_node_shaper_profile_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t shaper;
+ cmdline_fixed_string_t profile;
+ uint16_t port_id;
+ uint32_t node_id;
+ uint32_t shaper_profile_id;
+};
+
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result, tm, "tm");
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ node, "node");
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_shaper =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ shaper, "shaper");
+cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_profile =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ profile, "profile");
+cmdline_parse_token_num_t cmd_set_port_tm_node_shaper_profile_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_set_port_tm_node_shaper_profile_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_shaper_profile_result,
+ node_id, UINT32);
+cmdline_parse_token_num_t
+ cmd_set_port_tm_node_shaper_shaper_profile_profile_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_tm_node_shaper_profile_result,
+ shaper_profile_id, UINT32);
+
+static void cmd_set_port_tm_node_shaper_profile_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_tm_node_shaper_profile_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ uint32_t shaper_profile_id = res->shaper_profile_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (!port_is_started(port_id)) {
+ printf(" Port %u not started (error)\n", port_id);
+ return;
+ }
+
+ ret = rte_tm_node_shaper_update(port_id, node_id,
+ shaper_profile_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_tm_node_shaper_profile = {
+ .f = cmd_set_port_tm_node_shaper_profile_parsed,
+ .data = NULL,
+ .help_str = "Set port tm node shaper profile",
+ .tokens = {
+ (void *)&cmd_set_port_tm_node_shaper_profile_set,
+ (void *)&cmd_set_port_tm_node_shaper_profile_port,
+ (void *)&cmd_set_port_tm_node_shaper_profile_tm,
+ (void *)&cmd_set_port_tm_node_shaper_profile_node,
+ (void *)&cmd_set_port_tm_node_shaper_profile_shaper,
+ (void *)&cmd_set_port_tm_node_shaper_profile_profile,
+ (void *)&cmd_set_port_tm_node_shaper_profile_port_id,
+ (void *)&cmd_set_port_tm_node_shaper_profile_node_id,
+ (void *)&cmd_set_port_tm_node_shaper_shaper_profile_profile_id,
+ NULL,
+ },
+};
+
+/* *** Add Port TM nonleaf node *** */
+struct cmd_add_port_tm_nonleaf_node_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t nonleaf;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+ int32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t level_id;
+ uint32_t shaper_profile_id;
+ uint32_t n_sp_priorities;
+ uint64_t stats_mask;
+ cmdline_multi_string_t multi_shared_shaper_id;
+};
+
+cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result, port, "port");
+cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_nonleaf =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result, nonleaf, "nonleaf");
+cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result, node, "node");
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_nonleaf_node_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ node_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_parent_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ parent_node_id, INT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_priority =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ priority, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_weight =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ weight, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_level_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ level_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_shaper_profile_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ shaper_profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_n_sp_priorities =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ n_sp_priorities, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_stats_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ stats_mask, UINT64);
+cmdline_parse_token_string_t
+ cmd_add_port_tm_nonleaf_node_multi_shared_shaper_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
+ multi_shared_shaper_id, TOKEN_STRING_MULTI);
+
+static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_tm_nonleaf_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ struct rte_tm_node_params np;
+ uint32_t *shared_shaper_id;
+ uint32_t parent_node_id, n_shared_shapers = 0;
+ char *s_str = res->multi_shared_shaper_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (port_is_started(port_id)) {
+ printf(" Port %u not stopped (error)\n", port_id);
+ return;
+ }
+
+ memset(&np, 0, sizeof(struct rte_tm_node_params));
+
+ /* Node parameters */
+ if (res->parent_node_id < 0)
+ parent_node_id = UINT32_MAX;
+ else
+ parent_node_id = res->parent_node_id;
+
+ shared_shaper_id = (uint32_t *)malloc(MAX_NUM_SHARED_SHAPERS *
+ sizeof(uint32_t));
+ if (shared_shaper_id == NULL) {
+ printf(" Memory not allocated for shared shapers (error)\n");
+ return;
+ }
+
+ /* Parse multi shared shaper id string */
+ ret = parse_multi_ss_id_str(s_str, &n_shared_shapers, shared_shaper_id);
+ if (ret) {
+ printf(" Shared shapers params string parse error\n");
+ free(shared_shaper_id);
+ return;
+ }
+
+ np.shaper_profile_id = res->shaper_profile_id;
+ np.n_shared_shapers = n_shared_shapers;
+ if (np.n_shared_shapers)
+ np.shared_shaper_id = &shared_shaper_id[0];
+ else
+ np.shared_shaper_id = NULL;
+
+ np.nonleaf.n_sp_priorities = res->n_sp_priorities;
+ np.stats_mask = res->stats_mask;
+ np.nonleaf.wfq_weight_mode = NULL;
+
+ ret = rte_tm_node_add(port_id, res->node_id, parent_node_id,
+ res->priority, res->weight, res->level_id,
+ &np, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ free(shared_shaper_id);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node = {
+ .f = cmd_add_port_tm_nonleaf_node_parsed,
+ .data = NULL,
+ .help_str = "Add port tm nonleaf node",
+ .tokens = {
+ (void *)&cmd_add_port_tm_nonleaf_node_add,
+ (void *)&cmd_add_port_tm_nonleaf_node_port,
+ (void *)&cmd_add_port_tm_nonleaf_node_tm,
+ (void *)&cmd_add_port_tm_nonleaf_node_nonleaf,
+ (void *)&cmd_add_port_tm_nonleaf_node_node,
+ (void *)&cmd_add_port_tm_nonleaf_node_port_id,
+ (void *)&cmd_add_port_tm_nonleaf_node_node_id,
+ (void *)&cmd_add_port_tm_nonleaf_node_parent_node_id,
+ (void *)&cmd_add_port_tm_nonleaf_node_priority,
+ (void *)&cmd_add_port_tm_nonleaf_node_weight,
+ (void *)&cmd_add_port_tm_nonleaf_node_level_id,
+ (void *)&cmd_add_port_tm_nonleaf_node_shaper_profile_id,
+ (void *)&cmd_add_port_tm_nonleaf_node_n_sp_priorities,
+ (void *)&cmd_add_port_tm_nonleaf_node_stats_mask,
+ (void *)&cmd_add_port_tm_nonleaf_node_multi_shared_shaper_id,
+ NULL,
+ },
+};
+
+/* *** Add Port TM leaf node *** */
+struct cmd_add_port_tm_leaf_node_result {
+ cmdline_fixed_string_t add;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t leaf;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+ int32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t level_id;
+ uint32_t shaper_profile_id;
+ uint32_t cman_mode;
+ uint32_t wred_profile_id;
+ uint64_t stats_mask;
+ cmdline_multi_string_t multi_shared_shaper_id;
+};
+
+cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_add =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_leaf_node_result, add, "add");
+cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_leaf_node_result, port, "port");
+cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_leaf_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_nonleaf =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_leaf_node_result, leaf, "leaf");
+cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_add_port_tm_leaf_node_result, node, "node");
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ node_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_parent_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ parent_node_id, INT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_priority =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ priority, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_weight =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ weight, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_level_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ level_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_shaper_profile_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ shaper_profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_cman_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ cman_mode, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_wred_profile_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ wred_profile_id, UINT32);
+cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_stats_mask =
+ TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ stats_mask, UINT64);
+cmdline_parse_token_string_t
+ cmd_add_port_tm_leaf_node_multi_shared_shaper_id =
+ TOKEN_STRING_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
+ multi_shared_shaper_id, TOKEN_STRING_MULTI);
+
+static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_add_port_tm_leaf_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ struct rte_tm_node_params np;
+ uint32_t *shared_shaper_id;
+ uint32_t parent_node_id, n_shared_shapers = 0;
+ portid_t port_id = res->port_id;
+ char *s_str = res->multi_shared_shaper_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (port_is_started(port_id)) {
+ printf(" Port %u not stopped (error)\n", port_id);
+ return;
+ }
+
+ memset(&np, 0, sizeof(struct rte_tm_node_params));
+
+ /* Node parameters */
+ if (res->parent_node_id < 0)
+ parent_node_id = UINT32_MAX;
+ else
+ parent_node_id = res->parent_node_id;
+
+ shared_shaper_id = (uint32_t *)malloc(MAX_NUM_SHARED_SHAPERS *
+ sizeof(uint32_t));
+ if (shared_shaper_id == NULL) {
+ printf(" Memory not allocated for shared shapers (error)\n");
+ return;
+ }
+
+ /* Parse multi shared shaper id string */
+ ret = parse_multi_ss_id_str(s_str, &n_shared_shapers, shared_shaper_id);
+ if (ret) {
+ printf(" Shared shapers params string parse error\n");
+ free(shared_shaper_id);
+ return;
+ }
+
+ np.shaper_profile_id = res->shaper_profile_id;
+ np.n_shared_shapers = n_shared_shapers;
+
+ if (np.n_shared_shapers)
+ np.shared_shaper_id = &shared_shaper_id[0];
+ else
+ np.shared_shaper_id = NULL;
+
+ np.leaf.cman = res->cman_mode;
+ np.leaf.wred.wred_profile_id = res->wred_profile_id;
+ np.stats_mask = res->stats_mask;
+
+ ret = rte_tm_node_add(port_id, res->node_id, parent_node_id,
+ res->priority, res->weight, res->level_id,
+ &np, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ free(shared_shaper_id);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_add_port_tm_leaf_node = {
+ .f = cmd_add_port_tm_leaf_node_parsed,
+ .data = NULL,
+ .help_str = "Add port tm leaf node",
+ .tokens = {
+ (void *)&cmd_add_port_tm_leaf_node_add,
+ (void *)&cmd_add_port_tm_leaf_node_port,
+ (void *)&cmd_add_port_tm_leaf_node_tm,
+ (void *)&cmd_add_port_tm_leaf_node_nonleaf,
+ (void *)&cmd_add_port_tm_leaf_node_node,
+ (void *)&cmd_add_port_tm_leaf_node_port_id,
+ (void *)&cmd_add_port_tm_leaf_node_node_id,
+ (void *)&cmd_add_port_tm_leaf_node_parent_node_id,
+ (void *)&cmd_add_port_tm_leaf_node_priority,
+ (void *)&cmd_add_port_tm_leaf_node_weight,
+ (void *)&cmd_add_port_tm_leaf_node_level_id,
+ (void *)&cmd_add_port_tm_leaf_node_shaper_profile_id,
+ (void *)&cmd_add_port_tm_leaf_node_cman_mode,
+ (void *)&cmd_add_port_tm_leaf_node_wred_profile_id,
+ (void *)&cmd_add_port_tm_leaf_node_stats_mask,
+ (void *)&cmd_add_port_tm_leaf_node_multi_shared_shaper_id,
+ NULL,
+ },
+};
+
+/* *** Delete Port TM Node *** */
+struct cmd_del_port_tm_node_result {
+ cmdline_fixed_string_t del;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_del_port_tm_node_del =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_result, del, "del");
+cmdline_parse_token_string_t cmd_del_port_tm_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_result, port, "port");
+cmdline_parse_token_string_t cmd_del_port_tm_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_del_port_tm_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_del_port_tm_node_result, node, "node");
+cmdline_parse_token_num_t cmd_del_port_tm_node_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_del_port_tm_node_result,
+ port_id, UINT16);
+cmdline_parse_token_num_t cmd_del_port_tm_node_node_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_del_port_tm_node_result,
+ node_id, UINT32);
+
+static void cmd_del_port_tm_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_del_port_tm_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (port_is_started(port_id)) {
+ printf(" Port %u not stopped (error)\n", port_id);
+ return;
+ }
+
+ ret = rte_tm_node_delete(port_id, node_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_del_port_tm_node = {
+ .f = cmd_del_port_tm_node_parsed,
+ .data = NULL,
+ .help_str = "Delete port tm node",
+ .tokens = {
+ (void *)&cmd_del_port_tm_node_del,
+ (void *)&cmd_del_port_tm_node_port,
+ (void *)&cmd_del_port_tm_node_tm,
+ (void *)&cmd_del_port_tm_node_node,
+ (void *)&cmd_del_port_tm_node_port_id,
+ (void *)&cmd_del_port_tm_node_node_id,
+ NULL,
+ },
+};
+
+/* *** Update Port TM Node Parent *** */
+struct cmd_set_port_tm_node_parent_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ cmdline_fixed_string_t parent;
+ uint16_t port_id;
+ uint32_t node_id;
+ uint32_t parent_id;
+ uint32_t priority;
+ uint32_t weight;
+};
+
+cmdline_parse_token_string_t cmd_set_port_tm_node_parent_set =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, set, "set");
+cmdline_parse_token_string_t cmd_set_port_tm_node_parent_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, port, "port");
+cmdline_parse_token_string_t cmd_set_port_tm_node_parent_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, tm, "tm");
+cmdline_parse_token_string_t cmd_set_port_tm_node_parent_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, node, "node");
+cmdline_parse_token_string_t cmd_set_port_tm_node_parent_parent =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, parent, "parent");
+cmdline_parse_token_num_t cmd_set_port_tm_node_parent_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_set_port_tm_node_parent_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_set_port_tm_node_parent_result, node_id, UINT32);
+cmdline_parse_token_num_t cmd_set_port_tm_node_parent_parent_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result,
+ parent_id, UINT32);
+cmdline_parse_token_num_t cmd_set_port_tm_node_parent_priority =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result,
+ priority, UINT32);
+cmdline_parse_token_num_t cmd_set_port_tm_node_parent_weight =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result,
+ weight, UINT32);
+
+static void cmd_set_port_tm_node_parent_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_port_tm_node_parent_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ uint32_t parent_id = res->parent_id;
+ uint32_t priority = res->priority;
+ uint32_t weight = res->weight;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ /* Port status */
+ if (!port_is_started(port_id)) {
+ printf(" Port %u not started (error)\n", port_id);
+ return;
+ }
+
+ ret = rte_tm_node_parent_update(port_id, node_id,
+ parent_id, priority, weight, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_set_port_tm_node_parent = {
+ .f = cmd_set_port_tm_node_parent_parsed,
+ .data = NULL,
+ .help_str = "Set port tm node parent",
+ .tokens = {
+ (void *)&cmd_set_port_tm_node_parent_set,
+ (void *)&cmd_set_port_tm_node_parent_port,
+ (void *)&cmd_set_port_tm_node_parent_tm,
+ (void *)&cmd_set_port_tm_node_parent_node,
+ (void *)&cmd_set_port_tm_node_parent_parent,
+ (void *)&cmd_set_port_tm_node_parent_port_id,
+ (void *)&cmd_set_port_tm_node_parent_node_id,
+ (void *)&cmd_set_port_tm_node_parent_parent_id,
+ (void *)&cmd_set_port_tm_node_parent_priority,
+ (void *)&cmd_set_port_tm_node_parent_weight,
+ NULL,
+ },
+};
+
+/* *** Port TM Hierarchy Commit *** */
+struct cmd_port_tm_hierarchy_commit_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t hierarchy;
+ cmdline_fixed_string_t commit;
+ uint16_t port_id;
+ cmdline_fixed_string_t clean_on_fail;
+};
+
+cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_port_tm_hierarchy_commit_result, port, "port");
+cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_port_tm_hierarchy_commit_result, tm, "tm");
+cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_hierarchy =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_port_tm_hierarchy_commit_result,
+ hierarchy, "hierarchy");
+cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_commit =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_port_tm_hierarchy_commit_result, commit, "commit");
+cmdline_parse_token_num_t cmd_port_tm_hierarchy_commit_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_port_tm_hierarchy_commit_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_clean_on_fail =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_tm_hierarchy_commit_result,
+ clean_on_fail, "yes#no");
+
+static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_port_tm_hierarchy_commit_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t clean_on_fail;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ if (strcmp(res->clean_on_fail, "yes") == 0)
+ clean_on_fail = 1;
+ else
+ clean_on_fail = 0;
+
+ ret = rte_tm_hierarchy_commit(port_id, clean_on_fail, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = {
+ .f = cmd_port_tm_hierarchy_commit_parsed,
+ .data = NULL,
+ .help_str = "Set port tm node shaper profile",
+ .tokens = {
+ (void *)&cmd_port_tm_hierarchy_commit_port,
+ (void *)&cmd_port_tm_hierarchy_commit_tm,
+ (void *)&cmd_port_tm_hierarchy_commit_hierarchy,
+ (void *)&cmd_port_tm_hierarchy_commit_commit,
+ (void *)&cmd_port_tm_hierarchy_commit_port_id,
+ (void *)&cmd_port_tm_hierarchy_commit_clean_on_fail,
+ NULL,
+ },
+};
diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h
new file mode 100644
index 00000000..9d5fdf0a
--- /dev/null
+++ b/app/test-pmd/cmdline_tm.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_TM_H_
+#define _CMDLINE_TM_H_
+
+ /* Traffic Management CLI */
+extern cmdline_parse_inst_t cmd_show_port_tm_cap;
+extern cmdline_parse_inst_t cmd_show_port_tm_level_cap;
+extern cmdline_parse_inst_t cmd_show_port_tm_node_cap;
+extern cmdline_parse_inst_t cmd_show_port_tm_node_type;
+extern cmdline_parse_inst_t cmd_show_port_tm_node_stats;
+extern cmdline_parse_inst_t cmd_add_port_tm_node_shaper_profile;
+extern cmdline_parse_inst_t cmd_del_port_tm_node_shaper_profile;
+extern cmdline_parse_inst_t cmd_add_port_tm_node_shared_shaper;
+extern cmdline_parse_inst_t cmd_del_port_tm_node_shared_shaper;
+extern cmdline_parse_inst_t cmd_add_port_tm_node_wred_profile;
+extern cmdline_parse_inst_t cmd_del_port_tm_node_wred_profile;
+extern cmdline_parse_inst_t cmd_set_port_tm_node_shaper_profile;
+extern cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node;
+extern cmdline_parse_inst_t cmd_add_port_tm_leaf_node;
+extern cmdline_parse_inst_t cmd_del_port_tm_node;
+extern cmdline_parse_inst_t cmd_set_port_tm_node_parent;
+extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit;
+
+#endif /* _CMDLINE_TM_H_ */
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 3ae3e1cd..cd2ac116 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -40,6 +40,10 @@
#include <inttypes.h>
#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
#include <rte_common.h>
#include <rte_byteorder.h>
@@ -67,6 +71,9 @@
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+#include <rte_pmd_i40e.h>
+#endif
#ifdef RTE_LIBRTE_BNXT_PMD
#include <rte_pmd_bnxt.h>
#endif
@@ -203,8 +210,10 @@ nic_stats_display(portid_t port_id)
if (diff_cycles > 0)
diff_cycles = prev_cycles[port_id] - diff_cycles;
- diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id];
- diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id];
+ diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
+ (stats.ipackets - prev_pkts_rx[port_id]) : 0;
+ diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
+ (stats.opackets - prev_pkts_tx[port_id]) : 0;
prev_pkts_rx[port_id] = stats.ipackets;
prev_pkts_tx[port_id] = stats.opackets;
mpps_rx = diff_cycles > 0 ?
@@ -283,10 +292,13 @@ nic_xstats_display(portid_t port_id)
}
/* Display xstats */
- for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
+ for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
+ if (xstats_hide_zero && !xstats[idx_xstat].value)
+ continue;
printf("%s: %"PRIu64"\n",
xstats_names[idx_xstat].name,
xstats[idx_xstat].value);
+ }
free(xstats_names);
free(xstats);
}
@@ -358,7 +370,7 @@ rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
if (rc != 0) {
- printf("Failed to retrieve information for port: %hhu, "
+ printf("Failed to retrieve information for port: %u, "
"RX queue: %hu\nerror desc: %s(%d)\n",
port_id, queue_id, strerror(-rc), rc);
return;
@@ -391,7 +403,7 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
if (rc != 0) {
- printf("Failed to retrieve information for port: %hhu, "
+ printf("Failed to retrieve information for port: %u, "
"TX queue: %hu\nerror desc: %s(%d)\n",
port_id, queue_id, strerror(-rc), rc);
return;
@@ -498,12 +510,15 @@ port_infos_display(portid_t port_id)
char *p;
printf("Supported flow types:\n");
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
- i++) {
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1;
+ i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
continue;
p = flowtype_to_str(i);
- printf(" %s\n", (p ? p : "unknown"));
+ if (p)
+ printf(" %s\n", p);
+ else
+ printf(" user defined %d\n", i);
}
}
@@ -598,6 +613,14 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
+ printf("HW timestamp: ");
+ if (dev->data->dev_conf.rxmode.hw_timestamp)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
printf("Double VLANs insert: ");
if (ports[port_id].tx_ol_flags &
@@ -947,6 +970,9 @@ static const struct {
MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
+ MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
+ MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
+ MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
};
/** Compute storage space needed by item specification. */
@@ -1474,7 +1500,7 @@ tx_desc_id_is_invalid(uint16_t txdesc_id)
}
static const struct rte_memzone *
-ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
+ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
{
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
@@ -1524,9 +1550,9 @@ ring_rxd_display_dword(union igb_ring_dword dword)
static void
ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
- uint8_t port_id,
+ portid_t port_id,
#else
- __rte_unused uint8_t port_id,
+ __rte_unused portid_t port_id,
#endif
uint16_t desc_id)
{
@@ -1879,7 +1905,7 @@ simple_fwd_config_setup(void)
fwd_streams[i]->rx_queue = 0;
fwd_streams[i]->tx_port = fwd_ports_ids[j];
fwd_streams[i]->tx_queue = 0;
- fwd_streams[i]->peer_addr = j;
+ fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
fwd_streams[i]->retry_enabled = retry_enabled;
if (port_topology == PORT_TOPOLOGY_PAIRED) {
@@ -1887,7 +1913,7 @@ simple_fwd_config_setup(void)
fwd_streams[j]->rx_queue = 0;
fwd_streams[j]->tx_port = fwd_ports_ids[i];
fwd_streams[j]->tx_queue = 0;
- fwd_streams[j]->peer_addr = i;
+ fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port;
fwd_streams[j]->retry_enabled = retry_enabled;
}
}
@@ -2420,7 +2446,7 @@ set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
}
void
-setup_gro(const char *mode, uint8_t port_id)
+setup_gro(const char *onoff, portid_t port_id)
{
if (!rte_eth_dev_is_valid_port(port_id)) {
printf("invalid port id %u\n", port_id);
@@ -2431,29 +2457,101 @@ setup_gro(const char *mode, uint8_t port_id)
" please stop forwarding first\n");
return;
}
- if (strcmp(mode, "on") == 0) {
- if (gro_ports[port_id].enable) {
- printf("port %u has enabled GRO\n", port_id);
+ if (strcmp(onoff, "on") == 0) {
+ if (gro_ports[port_id].enable != 0) {
+ printf("Port %u has enabled GRO. Please"
+ " disable GRO first\n", port_id);
return;
}
- gro_ports[port_id].enable = 1;
- gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
-
- if (gro_ports[port_id].param.max_flow_num == 0)
+ if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+ gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
gro_ports[port_id].param.max_flow_num =
GRO_DEFAULT_FLOW_NUM;
- if (gro_ports[port_id].param.max_item_per_flow == 0)
gro_ports[port_id].param.max_item_per_flow =
GRO_DEFAULT_ITEM_NUM_PER_FLOW;
+ }
+ gro_ports[port_id].enable = 1;
} else {
if (gro_ports[port_id].enable == 0) {
- printf("port %u has disabled GRO\n", port_id);
+ printf("Port %u has disabled GRO\n", port_id);
return;
}
gro_ports[port_id].enable = 0;
}
}
+void
+setup_gro_flush_cycles(uint8_t cycles)
+{
+ if (test_done == 0) {
+ printf("Before change flush interval for GRO,"
+ " please stop forwarding first.\n");
+ return;
+ }
+
+ if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
+ GRO_DEFAULT_FLUSH_CYCLES) {
+ printf("The flushing cycle be in the range"
+ " of 1 to %u. Revert to the default"
+ " value %u.\n",
+ GRO_MAX_FLUSH_CYCLES,
+ GRO_DEFAULT_FLUSH_CYCLES);
+ cycles = GRO_DEFAULT_FLUSH_CYCLES;
+ }
+
+ gro_flush_cycles = cycles;
+}
+
+void
+show_gro(portid_t port_id)
+{
+ struct rte_gro_param *param;
+ uint32_t max_pkts_num;
+
+ param = &gro_ports[port_id].param;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ printf("Invalid port id %u.\n", port_id);
+ return;
+ }
+ if (gro_ports[port_id].enable) {
+ printf("GRO type: TCP/IPv4\n");
+ if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+ max_pkts_num = param->max_flow_num *
+ param->max_item_per_flow;
+ } else
+ max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
+ printf("Max number of packets to perform GRO: %u\n",
+ max_pkts_num);
+ printf("Flushing cycles: %u\n", gro_flush_cycles);
+ } else
+ printf("Port %u doesn't enable GRO.\n", port_id);
+}
+
+void
+setup_gso(const char *mode, portid_t port_id)
+{
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ printf("invalid port id %u\n", port_id);
+ return;
+ }
+ if (strcmp(mode, "on") == 0) {
+ if (test_done == 0) {
+ printf("before enabling GSO,"
+ " please stop forwarding first\n");
+ return;
+ }
+ gso_ports[port_id].enable = 1;
+ } else if (strcmp(mode, "off") == 0) {
+ if (test_done == 0) {
+ printf("before disabling GSO,"
+ " please stop forwarding first\n");
+ return;
+ }
+ gso_ports[port_id].enable = 0;
+ }
+}
+
char*
list_pkt_forwarding_modes(void)
{
@@ -2771,6 +2869,12 @@ set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
}
}
+void
+set_xstats_hide_zero(uint8_t on_off)
+{
+ xstats_hide_zero = on_off;
+}
+
static inline void
print_fdir_mask(struct rte_eth_fdir_masks *mask)
{
@@ -3004,7 +3108,7 @@ fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
return;
}
}
- (void)rte_memcpy(&flex_conf->flex_mask[idx],
+ rte_memcpy(&flex_conf->flex_mask[idx],
cfg,
sizeof(struct rte_eth_fdir_flex_mask));
}
@@ -3034,7 +3138,7 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
return;
}
}
- (void)rte_memcpy(&flex_conf->flex_set[idx],
+ rte_memcpy(&flex_conf->flex_set[idx],
cfg,
sizeof(struct rte_eth_flex_payload_cfg));
@@ -3090,6 +3194,10 @@ set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
{
int diag = -ENOTSUP;
+ RTE_SET_USED(vf);
+ RTE_SET_USED(rate);
+ RTE_SET_USED(q_msk);
+
#ifdef RTE_LIBRTE_IXGBE_PMD
if (diag == -ENOTSUP)
diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
@@ -3180,7 +3288,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
}
static void
-eth_port_multicast_addr_list_set(uint8_t port_id)
+eth_port_multicast_addr_list_set(portid_t port_id)
{
struct rte_port *port;
int diag;
@@ -3195,7 +3303,7 @@ eth_port_multicast_addr_list_set(uint8_t port_id)
}
void
-mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
+mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr)
{
struct rte_port *port;
uint32_t i;
@@ -3223,7 +3331,7 @@ mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
}
void
-mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
+mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr)
{
struct rte_port *port;
uint32_t i;
@@ -3250,7 +3358,7 @@ mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
}
void
-port_dcb_info_display(uint8_t port_id)
+port_dcb_info_display(portid_t port_id)
{
struct rte_eth_dcb_info dcb_info;
uint16_t i;
@@ -3295,46 +3403,43 @@ port_dcb_info_display(uint8_t port_id)
uint8_t *
open_ddp_package_file(const char *file_path, uint32_t *size)
{
- FILE *fh = fopen(file_path, "rb");
- uint32_t pkg_size;
+ int fd = open(file_path, O_RDONLY);
+ off_t pkg_size;
uint8_t *buf = NULL;
int ret = 0;
+ struct stat st_buf;
if (size)
*size = 0;
- if (fh == NULL) {
+ if (fd == -1) {
printf("%s: Failed to open %s\n", __func__, file_path);
return buf;
}
- ret = fseek(fh, 0, SEEK_END);
- if (ret < 0) {
- fclose(fh);
+ if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
+ close(fd);
printf("%s: File operations failed\n", __func__);
return buf;
}
- pkg_size = ftell(fh);
+ pkg_size = st_buf.st_size;
+ if (pkg_size < 0) {
+ close(fd);
+ printf("%s: File operations failed\n", __func__);
+ return buf;
+ }
buf = (uint8_t *)malloc(pkg_size);
if (!buf) {
- fclose(fh);
+ close(fd);
printf("%s: Failed to malloc memory\n", __func__);
return buf;
}
- ret = fseek(fh, 0, SEEK_SET);
- if (ret < 0) {
- fclose(fh);
- printf("%s: File seek operation failed\n", __func__);
- close_ddp_package_file(buf);
- return NULL;
- }
-
- ret = fread(buf, 1, pkg_size, fh);
+ ret = read(fd, buf, pkg_size);
if (ret < 0) {
- fclose(fh);
+ close(fd);
printf("%s: File read operation failed\n", __func__);
close_ddp_package_file(buf);
return NULL;
@@ -3343,7 +3448,7 @@ open_ddp_package_file(const char *file_path, uint32_t *size)
if (size)
*size = pkg_size;
- fclose(fh);
+ close(fd);
return buf;
}
@@ -3379,3 +3484,46 @@ close_ddp_package_file(uint8_t *buf)
return -1;
}
+
+void
+port_queue_region_info_display(portid_t port_id, void *buf)
+{
+#ifdef RTE_LIBRTE_I40E_PMD
+ uint16_t i, j;
+ struct rte_pmd_i40e_queue_regions *info =
+ (struct rte_pmd_i40e_queue_regions *)buf;
+ static const char *queue_region_info_stats_border = "-------";
+
+ if (!info->queue_region_number)
+ printf("there is no region has been set before");
+
+ printf("\n %s All queue region info for port=%2d %s",
+ queue_region_info_stats_border, port_id,
+ queue_region_info_stats_border);
+ printf("\n queue_region_number: %-14u \n",
+ info->queue_region_number);
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ printf("\n region_id: %-14u queue_number: %-14u "
+ "queue_start_index: %-14u \n",
+ info->region[i].region_id,
+ info->region[i].queue_num,
+ info->region[i].queue_start_index);
+
+ printf(" user_priority_num is %-14u :",
+ info->region[i].user_priority_num);
+ for (j = 0; j < info->region[i].user_priority_num; j++)
+ printf(" %-14u ", info->region[i].user_priority[j]);
+
+ printf("\n flowtype_num is %-14u :",
+ info->region[i].flowtype_num);
+ for (j = 0; j < info->region[i].flowtype_num; j++)
+ printf(" %-14u ", info->region[i].hw_flowtype[j]);
+ }
+#else
+ RTE_SET_USED(port_id);
+ RTE_SET_USED(buf);
+#endif
+
+ printf("\n\n");
+}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 90c81198..aa29f5fc 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -70,6 +69,8 @@
#include <rte_string_fns.h>
#include <rte_flow.h>
#include <rte_gro.h>
+#include <rte_gso.h>
+
#include "testpmd.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
@@ -91,6 +92,7 @@
/* structure that caches offload info for the current packet */
struct testpmd_offload_info {
uint16_t ethertype;
+ uint8_t gso_enable;
uint16_t l2_len;
uint16_t l3_len;
uint16_t l4_len;
@@ -381,6 +383,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
get_udptcp_checksum(l3_hdr, tcp_hdr,
info->ethertype);
}
+ if (info->gso_enable)
+ ol_flags |= PKT_TX_TCP_SEG;
} else if (info->l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
sctp_hdr->cksum = 0;
@@ -627,10 +631,16 @@ static void
pkt_burst_checksum_forward(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
+ struct rte_gso_ctx *gso_ctx;
+ struct rte_mbuf **tx_pkts_burst;
struct rte_port *txp;
struct rte_mbuf *m, *p;
struct ether_hdr *eth_hdr;
void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
+ void **gro_ctx;
+ uint16_t gro_pkts_num;
+ uint8_t gro_enable;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_prep;
@@ -641,6 +651,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
struct testpmd_offload_info info;
+ uint16_t nb_segments = 0;
+ int ret;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -657,23 +669,21 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
if (unlikely(nb_rx == 0))
return;
- if (unlikely(gro_ports[fs->rx_port].enable))
- nb_rx = rte_gro_reassemble_burst(pkts_burst,
- nb_rx,
- &(gro_ports[fs->rx_port].param));
-
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
rx_bad_l4_csum = 0;
+ gro_enable = gro_ports[fs->rx_port].enable;
txp = &ports[fs->tx_port];
testpmd_ol_flags = txp->tx_ol_flags;
memset(&info, 0, sizeof(info));
info.tso_segsz = txp->tso_segsz;
info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
+ if (gso_ports[fs->tx_port].enable)
+ info.gso_enable = 1;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
@@ -851,13 +861,57 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
+ if (unlikely(gro_enable)) {
+ if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+ nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
+ &(gro_ports[fs->rx_port].param));
+ } else {
+ gro_ctx = current_fwd_lcore()->gro_ctx;
+ nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
+
+ if (++fs->gro_times >= gro_flush_cycles) {
+ gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
+ if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
+ gro_pkts_num = MAX_PKT_BURST - nb_rx;
+
+ nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
+ RTE_GRO_TCP_IPV4,
+ &pkts_burst[nb_rx],
+ gro_pkts_num);
+ fs->gro_times = 0;
+ }
+ }
+ }
+
+ if (gso_ports[fs->tx_port].enable == 0)
+ tx_pkts_burst = pkts_burst;
+ else {
+ gso_ctx = &(current_fwd_lcore()->gso_ctx);
+ gso_ctx->gso_size = gso_max_segment_size;
+ for (i = 0; i < nb_rx; i++) {
+ ret = rte_gso_segment(pkts_burst[i], gso_ctx,
+ &gso_segments[nb_segments],
+ GSO_MAX_PKT_BURST - nb_segments);
+ if (ret >= 0)
+ nb_segments += ret;
+ else {
+ RTE_LOG(DEBUG, USER1,
+ "Unable to segment packet");
+ rte_pktmbuf_free(pkts_burst[i]);
+ }
+ }
+
+ tx_pkts_burst = gso_segments;
+ nb_rx = nb_segments;
+ }
+
nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
- pkts_burst, nb_rx);
+ tx_pkts_burst, nb_rx);
if (nb_prep != nb_rx)
printf("Preparing packet burst to transmit failed: %s\n",
rte_strerror(rte_errno));
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
nb_prep);
/*
@@ -868,7 +922,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
rte_delay_us(burst_tx_delay_time);
nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
+ &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
}
}
fs->tx_packets += nb_tx;
@@ -881,9 +935,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (unlikely(nb_tx < nb_rx)) {
fs->fwd_dropped += (nb_rx - nb_tx);
do {
- rte_pktmbuf_free(pkts_burst[nb_tx]);
+ rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
+
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
end_tsc = rte_rdtsc();
core_cycles = (end_tsc - start_tsc);
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 54e56f60..acf9af94 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -50,7 +50,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index 51170ee3..91ee7864 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -86,12 +86,11 @@ port_ieee1588_rx_timestamp_check(portid_t pi, uint32_t index)
struct timespec timestamp = {0, 0};
if (rte_eth_timesync_read_rx_timestamp(pi, &timestamp, index) < 0) {
- printf("Port %u RX timestamp registers not valid\n",
- (unsigned) pi);
+ printf("Port %u RX timestamp registers not valid\n", pi);
return;
}
printf("Port %u RX timestamp value %lu s %lu ns\n",
- (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec);
+ pi, timestamp.tv_sec, timestamp.tv_nsec);
}
#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */
@@ -110,12 +109,12 @@ port_ieee1588_tx_timestamp_check(portid_t pi)
if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) {
printf("Port %u TX timestamp registers not valid after "
"%u micro-seconds\n",
- (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS);
+ pi, MAX_TX_TMST_WAIT_MICROSECS);
return;
}
printf("Port %u TX timestamp value %lu s %lu ns validated after "
"%u micro-second%s\n",
- (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us,
+ pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us,
(wait_us == 1) ? "" : "s");
}
@@ -148,11 +147,11 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
if (eth_type == ETHER_TYPE_1588) {
printf("Port %u Received PTP packet not filtered"
" by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
} else {
printf("Port %u Received non PTP packet type=0x%4x "
"len=%u\n",
- (unsigned) fs->rx_port, eth_type,
+ fs->rx_port, eth_type,
(unsigned) mb->pkt_len);
}
rte_pktmbuf_free(mb);
@@ -161,7 +160,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
if (eth_type != ETHER_TYPE_1588) {
printf("Port %u Received NON PTP packet incorrectly"
" detected by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
rte_pktmbuf_free(mb);
return;
}
@@ -175,19 +174,19 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
if (ptp_hdr->version != 0x02) {
printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
" protocol version 0x%x (should be 0x02)\n",
- (unsigned) fs->rx_port, ptp_hdr->version);
+ fs->rx_port, ptp_hdr->version);
rte_pktmbuf_free(mb);
return;
}
if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) {
printf("Port %u Received PTP V2 Ethernet frame with unexpected"
" message ID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n",
- (unsigned) fs->rx_port, ptp_hdr->msg_id);
+ fs->rx_port, ptp_hdr->msg_id);
rte_pktmbuf_free(mb);
return;
}
printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
/*
* Check that the received PTP packet has been timestamped by the
@@ -196,7 +195,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
printf("Port %u Received PTP packet not timestamped"
" by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
rte_pktmbuf_free(mb);
return;
}
@@ -216,8 +215,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
mb->ol_flags |= PKT_TX_IEEE1588_TMST;
fs->tx_packets += 1;
if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
- printf("Port %u sent PTP packet dropped\n",
- (unsigned) fs->rx_port);
+ printf("Port %u sent PTP packet dropped\n", fs->rx_port);
fs->fwd_dropped += 1;
rte_pktmbuf_free(mb);
return;
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 9b54f654..ff6de45c 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -48,7 +48,6 @@
#include <rte_debug.h>
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 06dbc73a..f4a4bf29 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 19cda0ea..721865c9 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 2f7f70fd..84e7a63e 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -55,7 +55,6 @@
#include <rte_debug.h>
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -162,6 +161,7 @@ usage(char* progname)
printf(" --disable-crc-strip: disable CRC stripping by hardware.\n");
printf(" --enable-lro: enable large receive offload.\n");
printf(" --enable-rx-cksum: enable rx hardware checksum offload.\n");
+ printf(" --enable-rx-timestamp: enable rx hardware timestamp offload.\n");
printf(" --disable-hw-vlan: disable hardware vlan.\n");
printf(" --disable-hw-vlan-filter: disable hardware vlan filter.\n");
printf(" --disable-hw-vlan-strip: disable hardware vlan strip.\n");
@@ -393,7 +393,8 @@ parse_portnuma_config(const char *q_arg)
char s[256];
const char *p, *p0 = q_arg;
char *end;
- uint8_t i,port_id,socket_id;
+ uint8_t i, socket_id;
+ portid_t port_id;
unsigned size;
enum fieldnames {
FLD_PORT = 0,
@@ -423,8 +424,9 @@ parse_portnuma_config(const char *q_arg)
if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
return -1;
}
- port_id = (uint8_t)int_fld[FLD_PORT];
- if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ port_id = (portid_t)int_fld[FLD_PORT];
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL) {
printf("Valid port range is [0");
RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
@@ -448,7 +450,8 @@ parse_ringnuma_config(const char *q_arg)
char s[256];
const char *p, *p0 = q_arg;
char *end;
- uint8_t i,port_id,ring_flag,socket_id;
+ uint8_t i, ring_flag, socket_id;
+ portid_t port_id;
unsigned size;
enum fieldnames {
FLD_PORT = 0,
@@ -482,8 +485,9 @@ parse_ringnuma_config(const char *q_arg)
if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
return -1;
}
- port_id = (uint8_t)int_fld[FLD_PORT];
- if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ port_id = (portid_t)int_fld[FLD_PORT];
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL) {
printf("Valid port range is [0");
RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
@@ -601,6 +605,7 @@ launch_args_parse(int argc, char** argv)
{ "disable-crc-strip", 0, 0, 0 },
{ "enable-lro", 0, 0, 0 },
{ "enable-rx-cksum", 0, 0, 0 },
+ { "enable-rx-timestamp", 0, 0, 0 },
{ "enable-scatter", 0, 0, 0 },
{ "disable-hw-vlan", 0, 0, 0 },
{ "disable-hw-vlan-filter", 0, 0, 0 },
@@ -734,7 +739,7 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "nb-ports")) {
n = atoi(optarg);
if (n > 0 && n <= nb_ports)
- nb_fwd_ports = (uint8_t) n;
+ nb_fwd_ports = n;
else
rte_exit(EXIT_FAILURE,
"Invalid port %d\n", n);
@@ -899,6 +904,9 @@ launch_args_parse(int argc, char** argv)
rx_mode.enable_scatter = 1;
if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
rx_mode.hw_ip_checksum = 1;
+ if (!strcmp(lgopts[opt_idx].name,
+ "enable-rx-timestamp"))
+ rx_mode.hw_timestamp = 1;
if (!strcmp(lgopts[opt_idx].name, "disable-hw-vlan")) {
rx_mode.hw_vlan_filter = 0;
@@ -932,7 +940,7 @@ launch_args_parse(int argc, char** argv)
port_topology = PORT_TOPOLOGY_LOOP;
else
rte_exit(EXIT_FAILURE, "port-topology %s invalid -"
- " must be: paired or chained \n",
+ " must be: paired, chained or loop\n",
optarg);
}
if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 5ef02190..fb6e8e33 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -122,7 +121,7 @@ pkt_burst_receive(struct fwd_stream *fs)
*/
if (verbose_level > 0)
printf("port %u/queue %u: received %u packets\n",
- (unsigned) fs->rx_port,
+ fs->rx_port,
(unsigned) fs->rx_queue,
(unsigned) nb_rx);
for (i = 0; i < nb_rx; i++) {
@@ -158,6 +157,8 @@ pkt_burst_receive(struct fwd_stream *fs)
printf("hash=0x%x ID=0x%x ",
mb->hash.fdir.hash, mb->hash.fdir.id);
}
+ if (ol_flags & PKT_RX_TIMESTAMP)
+ printf(" - timestamp %"PRIu64" ", mb->timestamp);
if (ol_flags & PKT_RX_VLAN_STRIPPED)
printf(" - VLAN tci=0x%x", mb->vlan_tci);
if (ol_flags & PKT_RX_QINQ_STRIPPED)
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 7d401394..c3ab4484 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -38,6 +38,7 @@
#include <string.h>
#include <time.h>
#include <fcntl.h>
+#include <sys/mman.h>
#include <sys/types.h>
#include <errno.h>
@@ -56,7 +57,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_alarm.h>
@@ -76,9 +76,6 @@
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
-#ifdef RTE_LIBRTE_PMD_XENVIRT
-#include <rte_eth_xenvirt.h>
-#endif
#ifdef RTE_LIBRTE_PDUMP
#include <rte_pdump.h>
#endif
@@ -90,7 +87,6 @@
#ifdef RTE_LIBRTE_LATENCY_STATS
#include <rte_latencystats.h>
#endif
-#include <rte_gro.h>
#include "testpmd.h"
@@ -167,6 +163,10 @@ struct fwd_engine * fwd_engines[] = {
&tx_only_engine,
&csum_fwd_engine,
&icmp_echo_engine,
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+ &softnic_tm_engine,
+ &softnic_tm_bypass_engine,
+#endif
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
#endif
@@ -183,6 +183,13 @@ uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
+
+/*
+ * In container, it cannot terminate the process which running with 'stats-period'
+ * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
+ */
+uint8_t f_quit;
+
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/
@@ -339,6 +346,7 @@ struct rte_eth_rxmode rx_mode = {
.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
.hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
+ .hw_timestamp = 0, /**< HW timestamp enabled. */
};
struct rte_fdir_conf fdir_conf = {
@@ -375,6 +383,11 @@ struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_a
uint16_t nb_tx_queue_stats_mappings = 0;
uint16_t nb_rx_queue_stats_mappings = 0;
+/*
+ * Display zero values by default for xstats
+ */
+uint8_t xstats_hide_zero;
+
unsigned int num_sockets = 0;
unsigned int socket_ids[RTE_MAX_NUMA_NODES];
@@ -386,11 +399,13 @@ uint8_t bitrate_enabled;
#endif
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
/* Forward function declarations */
-static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
+static void map_port_queue_stats_mapping_registers(portid_t pi,
+ struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
-static int eth_event_callback(uint8_t port_id,
+static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
@@ -400,6 +415,9 @@ static int eth_event_callback(uint8_t port_id,
*/
static int all_ports_started(void);
+struct gso_status gso_ports[RTE_MAX_ETHPORTS];
+uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
+
/*
* Helper function to check if socket is already discovered.
* If yes, return positive value. If not, return zero.
@@ -463,9 +481,10 @@ static void
set_default_fwd_ports_config(void)
{
portid_t pt_id;
+ int i = 0;
- for (pt_id = 0; pt_id < nb_ports; pt_id++)
- fwd_ports_ids[pt_id] = pt_id;
+ RTE_ETH_FOREACH_DEV(pt_id)
+ fwd_ports_ids[i++] = pt_id;
nb_cfg_ports = nb_ports;
nb_fwd_ports = nb_ports;
@@ -497,37 +516,25 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
pool_name, nb_mbuf, mbuf_seg_size, socket_id);
-#ifdef RTE_LIBRTE_PMD_XENVIRT
- rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
- (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- socket_id, 0);
-#endif
-
- /* if the former XEN allocation failed fall back to normal allocation */
- if (rte_mp == NULL) {
- if (mp_anon != 0) {
- rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
- mb_size, (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- socket_id, 0);
- if (rte_mp == NULL)
- goto err;
-
- if (rte_mempool_populate_anon(rte_mp) == 0) {
- rte_mempool_free(rte_mp);
- rte_mp = NULL;
- goto err;
- }
- rte_pktmbuf_pool_init(rte_mp, NULL);
- rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
- } else {
- /* wrapper to rte_mempool_create() */
- rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
- mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ if (mp_anon != 0) {
+ rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
+ mb_size, (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ socket_id, 0);
+ if (rte_mp == NULL)
+ goto err;
+
+ if (rte_mempool_populate_anon(rte_mp) == 0) {
+ rte_mempool_free(rte_mp);
+ rte_mp = NULL;
+ goto err;
}
+ rte_pktmbuf_pool_init(rte_mp, NULL);
+ rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ } else {
+ /* wrapper to rte_mempool_create() */
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
}
err:
@@ -570,6 +577,8 @@ init_config(void)
unsigned int nb_mbuf_per_pool;
lcoreid_t lc_id;
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
+ struct rte_gro_param gro_param;
+ uint32_t gso_types;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
@@ -654,6 +663,8 @@ init_config(void)
init_port_config();
+ gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@@ -664,6 +675,13 @@ init_config(void)
if (mbp == NULL)
mbp = mbuf_pool_find(0);
fwd_lcores[lc_id]->mbp = mbp;
+ /* initialize GSO context */
+ fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
+ fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
+ fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
+ fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
+ ETHER_CRC_LEN;
+ fwd_lcores[lc_id]->gso_ctx.flag = 0;
}
/* Configuration of packet forwarding streams. */
@@ -671,6 +689,20 @@ init_config(void)
rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
fwd_config_setup();
+
+ /* create a gro context for each lcore */
+ gro_param.gro_types = RTE_GRO_TCP_IPV4;
+ gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
+ gro_param.max_item_per_flow = MAX_PKT_BURST;
+ for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
+ gro_param.socket_id = rte_lcore_to_socket_id(
+ fwd_lcores_cpuids[lc_id]);
+ fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
+ if (fwd_lcores[lc_id]->gro_ctx == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "rte_gro_ctx_create() failed\n");
+ }
+ }
}
@@ -1217,6 +1249,7 @@ stop_packet_forwarding(void)
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t fwd_cycles;
#endif
+
static const char *acc_stats_border = "+++++++++++++++";
if (test_done) {
@@ -1307,6 +1340,7 @@ stop_packet_forwarding(void)
fwd_port_stats_display(pt_id, &stats);
}
+
printf("\n %s Accumulated forward statistics for all ports"
"%s\n",
acc_stats_border, acc_stats_border);
@@ -1335,14 +1369,14 @@ stop_packet_forwarding(void)
void
dev_set_link_up(portid_t pid)
{
- if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
+ if (rte_eth_dev_set_link_up(pid) < 0)
printf("\nSet link up fail.\n");
}
void
dev_set_link_down(portid_t pid)
{
- if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
+ if (rte_eth_dev_set_link_down(pid) < 0)
printf("\nSet link down fail.\n");
}
@@ -1683,6 +1717,47 @@ close_port(portid_t pid)
}
void
+reset_port(portid_t pid)
+{
+ int diag;
+ portid_t pi;
+ struct rte_port *port;
+
+ if (port_id_is_invalid(pid, ENABLED_WARN))
+ return;
+
+ printf("Resetting ports...\n");
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
+ continue;
+
+ if (port_is_forwarding(pi) != 0 && test_done == 0) {
+ printf("Please remove port %d from forwarding "
+ "configuration.\n", pi);
+ continue;
+ }
+
+ if (port_is_bonding_slave(pi)) {
+ printf("Please remove port %d from bonded device.\n",
+ pi);
+ continue;
+ }
+
+ diag = rte_eth_dev_reset(pi);
+ if (diag == 0) {
+ port = &ports[pi];
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ } else {
+ printf("Failed to reset port %d. diag=%d\n", pi, diag);
+ }
+ }
+
+ printf("Done\n");
+}
+
+void
attach_port(char *identifier)
{
portid_t pi = 0;
@@ -1714,7 +1789,7 @@ attach_port(char *identifier)
}
void
-detach_port(uint8_t port_id)
+detach_port(portid_t port_id)
{
char name[RTE_ETH_NAME_MAX_LEN];
@@ -1775,7 +1850,8 @@ check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ portid_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("Checking link statuses...\n");
@@ -1790,14 +1866,13 @@ check_all_ports_link_status(uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. speed %u Mbps- %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -1829,7 +1904,7 @@ static void
rmv_event_callback(void *arg)
{
struct rte_eth_dev *dev;
- uint8_t port_id = (intptr_t)arg;
+ portid_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
@@ -1844,7 +1919,7 @@ rmv_event_callback(void *arg)
/* This function is used by the interrupt thread */
static int
-eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
static const char * const event_desc[] = {
@@ -1884,7 +1959,7 @@ eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
}
static int
-set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
+set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
{
uint16_t i;
int diag;
@@ -1907,7 +1982,7 @@ set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
}
static int
-set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
+set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
{
uint16_t i;
int diag;
@@ -1930,7 +2005,7 @@ set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
}
static void
-map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
+map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
{
int diag = 0;
@@ -2044,6 +2119,17 @@ init_port_config(void)
(rte_eth_devices[pid].data->dev_flags &
RTE_ETH_DEV_INTR_RMV))
port->dev_conf.intr_conf.rmv = 1;
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+ /* Detect softnic port */
+ if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
+ port->softnic_enable = 1;
+ memset(&port->softport, 0, sizeof(struct softnic_port));
+
+ if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
+ port->softport.tm_flag = 1;
+ }
+#endif
}
}
@@ -2111,8 +2197,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1 << (i % vmdq_rx_conf->nb_queue_pools);
}
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- vmdq_rx_conf->dcb_tc[i] = i;
- vmdq_tx_conf->dcb_tc[i] = i;
+ vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
+ vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
}
/* set DCB mode of RX and TX of multiple queues */
@@ -2173,7 +2259,7 @@ init_port_dcb_config(portid_t pid,
* Set the numbers of RX & TX queues to 0, so
* the RX & TX queues will not be setup.
*/
- (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
+ rte_eth_dev_configure(pid, 0, 0, &port_conf);
rte_eth_dev_info_get(pid, &rte_port->dev_info);
@@ -2277,6 +2363,8 @@ signal_handler(int signum)
rte_latencystats_uninit();
#endif
force_quit();
+ /* Set flag to indicate the force termination. */
+ f_quit = 1;
/* exit with the expected status */
signal(signum, SIG_DFL);
kill(getpid(), signum);
@@ -2287,7 +2375,7 @@ int
main(int argc, char** argv)
{
int diag;
- uint8_t port_id;
+ portid_t port_id;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
@@ -2296,6 +2384,11 @@ main(int argc, char** argv)
if (diag < 0)
rte_panic("Cannot init EAL\n");
+ if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
+ RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
+ strerror(errno));
+ }
+
#ifdef RTE_LIBRTE_PDUMP
/* initialize packet capture framework */
rte_pdump_init(NULL);
@@ -2394,6 +2487,8 @@ main(int argc, char** argv)
char c;
int rc;
+ f_quit = 0;
+
printf("No commandline core given, start packet forwarding\n");
start_packet_forwarding(tx_first);
if (stats_period != 0) {
@@ -2403,7 +2498,7 @@ main(int argc, char** argv)
/* Convert to number of cycles */
timer_period = stats_period * rte_get_timer_hz();
- while (1) {
+ while (f_quit == 0) {
cur_time = rte_get_timer_cycles();
diff_time += cur_time - prev_time;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index c9d7739b..1639d27e 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -35,7 +35,9 @@
#define _TESTPMD_H_
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_gro.h>
+#include <rte_gso.h>
#define RTE_PORT_ALL (~(portid_t)0x0)
@@ -78,12 +80,18 @@
#define UMA_NO_CONFIG 0xFF
typedef uint8_t lcoreid_t;
-typedef uint8_t portid_t;
+typedef uint16_t portid_t;
typedef uint16_t queueid_t;
typedef uint16_t streamid_t;
#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
+#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
+#define TM_MODE 1
+#else
+#define TM_MODE 0
+#endif
+
enum {
PORT_TOPOLOGY_PAIRED,
PORT_TOPOLOGY_CHAINED,
@@ -120,6 +128,7 @@ struct fwd_stream {
unsigned int fwd_dropped; /**< received packets not forwarded */
unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+ unsigned int gro_times; /**< GRO operation times */
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t core_cycles; /**< used for RX and TX processing */
#endif
@@ -162,6 +171,38 @@ struct port_flow {
uint8_t data[]; /**< Storage for pattern/actions. */
};
+#ifdef TM_MODE
+/**
+ * Soft port tm related parameters
+ */
+struct softnic_port_tm {
+ uint32_t default_hierarchy_enable; /**< def hierarchy enable flag */
+ uint32_t hierarchy_config; /**< set to 1 if hierarchy configured */
+
+ uint32_t n_subports_per_port; /**< Num of subport nodes per port */
+ uint32_t n_pipes_per_subport; /**< Num of pipe nodes per subport */
+
+ uint64_t tm_pktfield0_slabpos; /**< Pkt field position for subport */
+ uint64_t tm_pktfield0_slabmask; /**< Pkt field mask for the subport */
+ uint64_t tm_pktfield0_slabshr;
+ uint64_t tm_pktfield1_slabpos; /**< Pkt field position for the pipe */
+ uint64_t tm_pktfield1_slabmask; /**< Pkt field mask for the pipe */
+ uint64_t tm_pktfield1_slabshr;
+ uint64_t tm_pktfield2_slabpos; /**< Pkt field position table index */
+ uint64_t tm_pktfield2_slabmask; /**< Pkt field mask for tc table idx */
+ uint64_t tm_pktfield2_slabshr;
+ uint64_t tm_tc_table[64]; /**< TC translation table */
+};
+
+/**
+ * The data structure associate with softnic port
+ */
+struct softnic_port {
+ unsigned int tm_flag; /**< set to 1 if tm feature is enabled */
+ struct softnic_port_tm tm; /**< softnic port tm parameters */
+};
+#endif
+
/**
* The data structure associated with each port.
*/
@@ -195,6 +236,10 @@ struct rte_port {
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
struct port_flow *flow_list; /**< Associated flows. */
+#ifdef TM_MODE
+ unsigned int softnic_enable; /**< softnic flag */
+ struct softnic_port softport; /**< softnic port params */
+#endif
};
/**
@@ -205,7 +250,9 @@ struct rte_port {
* CPU id. configuration table.
*/
struct fwd_lcore {
+ struct rte_gso_ctx gso_ctx; /**< GSO context */
struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
+ void *gro_ctx; /**< GRO context */
streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
@@ -253,6 +300,10 @@ extern struct fwd_engine rx_only_engine;
extern struct fwd_engine tx_only_engine;
extern struct fwd_engine csum_fwd_engine;
extern struct fwd_engine icmp_echo_engine;
+#ifdef TM_MODE
+extern struct fwd_engine softnic_tm_engine;
+extern struct fwd_engine softnic_tm_bypass_engine;
+#endif
#ifdef RTE_LIBRTE_IEEE1588
extern struct fwd_engine ieee1588_fwd_engine;
#endif
@@ -283,7 +334,7 @@ enum dcb_mode_enable
#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */
struct queue_stats_mappings {
- uint8_t port_id;
+ portid_t port_id;
uint16_t queue_id;
uint8_t stats_counter_id;
} __rte_cache_aligned;
@@ -298,6 +349,8 @@ extern struct queue_stats_mappings *rx_queue_stats_mappings;
extern uint16_t nb_tx_queue_stats_mappings;
extern uint16_t nb_rx_queue_stats_mappings;
+extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */
+
/* globals used for configuration */
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
extern uint8_t interactive;
@@ -434,13 +487,26 @@ extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
-#define GRO_DEFAULT_FLOW_NUM 4
-#define GRO_DEFAULT_ITEM_NUM_PER_FLOW DEF_PKT_BURST
+#define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32
+#define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \
+ GRO_DEFAULT_ITEM_NUM_PER_FLOW)
+
+#define GRO_DEFAULT_FLUSH_CYCLES 1
+#define GRO_MAX_FLUSH_CYCLES 4
+
struct gro_status {
struct rte_gro_param param;
uint8_t enable;
};
extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
+extern uint8_t gro_flush_cycles;
+
+#define GSO_MAX_PKT_BURST 2048
+struct gso_status {
+ uint8_t enable;
+};
+extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
+extern uint16_t gso_max_segment_size;
static inline unsigned int
lcore_num(void)
@@ -587,6 +653,8 @@ void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on);
void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value);
+void set_xstats_hide_zero(uint8_t on_off);
+
void set_verbose_level(uint16_t vb_level);
void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs);
void show_tx_pkt_segments(void);
@@ -610,8 +678,9 @@ int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
int start_port(portid_t pid);
void stop_port(portid_t pid);
void close_port(portid_t pid);
+void reset_port(portid_t pid);
void attach_port(char *identifier);
-void detach_port(uint8_t port_id);
+void detach_port(portid_t port_id);
int all_ports_stopped(void);
int port_is_started(portid_t port_id);
void pmd_test_exit(void);
@@ -634,23 +703,24 @@ void port_rss_hash_conf_show(portid_t port_id, char rss_info[],
int show_rss_key);
void port_rss_hash_key_update(portid_t port_id, char rss_type[],
uint8_t *hash_key, uint hash_key_len);
-void get_syn_filter(uint8_t port_id);
-void get_ethertype_filter(uint8_t port_id, uint16_t index);
-void get_2tuple_filter(uint8_t port_id, uint16_t index);
-void get_5tuple_filter(uint8_t port_id, uint16_t index);
int rx_queue_id_is_invalid(queueid_t rxq_id);
int tx_queue_id_is_invalid(queueid_t txq_id);
-void setup_gro(const char *mode, uint8_t port_id);
+void setup_gro(const char *onoff, portid_t port_id);
+void setup_gro_flush_cycles(uint8_t cycles);
+void show_gro(portid_t port_id);
+void setup_gso(const char *mode, portid_t port_id);
/* Functions to manage the set of filtered Multicast MAC addresses */
-void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
-void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr);
-void port_dcb_info_display(uint8_t port_id);
+void mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr);
+void mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr);
+void port_dcb_info_display(portid_t port_id);
uint8_t *open_ddp_package_file(const char *file_path, uint32_t *size);
int save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size);
int close_ddp_package_file(uint8_t *buf);
+void port_queue_region_info_display(portid_t port_id, void *buf);
+
enum print_warning {
ENABLED_WARN = 0,
DISABLED_WARN
diff --git a/app/test-pmd/tm.c b/app/test-pmd/tm.c
new file mode 100644
index 00000000..dd837cb8
--- /dev/null
+++ b/app/test-pmd/tm.c
@@ -0,0 +1,865 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <sys/stat.h>
+
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_meter.h>
+#include <rte_eth_softnic.h>
+#include <rte_tm.h>
+
+#include "testpmd.h"
+
+#define SUBPORT_NODES_PER_PORT 1
+#define PIPE_NODES_PER_SUBPORT 4096
+#define TC_NODES_PER_PIPE 4
+#define QUEUE_NODES_PER_TC 4
+
+#define NUM_PIPE_NODES \
+ (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT)
+
+#define NUM_TC_NODES \
+ (NUM_PIPE_NODES * TC_NODES_PER_PIPE)
+
+#define ROOT_NODE_ID 1000000
+#define SUBPORT_NODES_START_ID 900000
+#define PIPE_NODES_START_ID 800000
+#define TC_NODES_START_ID 700000
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | \
+ RTE_TM_STATS_N_PKTS_QUEUED)
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+#define TOKEN_BUCKET_SIZE 1000000
+
+/* TM Hierarchy Levels */
+enum tm_hierarchy_level {
+ TM_NODE_LEVEL_PORT = 0,
+ TM_NODE_LEVEL_SUBPORT,
+ TM_NODE_LEVEL_PIPE,
+ TM_NODE_LEVEL_TC,
+ TM_NODE_LEVEL_QUEUE,
+ TM_NODE_LEVEL_MAX,
+};
+
+struct tm_hierarchy {
+ /* TM Nodes */
+ uint32_t root_node_id;
+ uint32_t subport_node_id[SUBPORT_NODES_PER_PORT];
+ uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT];
+ uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE];
+ uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC];
+
+ /* TM Hierarchy Nodes Shaper Rates */
+ uint32_t root_node_shaper_rate;
+ uint32_t subport_node_shaper_rate;
+ uint32_t pipe_node_shaper_rate;
+ uint32_t tc_node_shaper_rate;
+ uint32_t tc_node_shared_shaper_rate;
+
+ uint32_t n_shapers;
+};
+
+#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
+({ \
+ uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \
+ uint64_t val = \
+ (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
+ val; \
+})
+
+#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
+ traffic_class, queue, color) \
+ ((((uint64_t) (queue)) & 0x3) | \
+ ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
+ ((((uint64_t) (color)) & 0x3) << 4) | \
+ ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
+ ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
+
+
+static void
+pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts,
+ uint32_t n_pkts)
+{
+ struct softnic_port_tm *tm = &p->softport.tm;
+ uint32_t i;
+
+ for (i = 0; i < (n_pkts & (~0x3)); i += 4) {
+ struct rte_mbuf *pkt0 = pkts[i];
+ struct rte_mbuf *pkt1 = pkts[i + 1];
+ struct rte_mbuf *pkt2 = pkts[i + 2];
+ struct rte_mbuf *pkt3 = pkts[i + 3];
+
+ uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *);
+ uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *);
+ uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *);
+ uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *);
+
+ uint64_t pkt0_subport = BITFIELD(pkt0_data,
+ tm->tm_pktfield0_slabpos,
+ tm->tm_pktfield0_slabmask,
+ tm->tm_pktfield0_slabshr);
+ uint64_t pkt0_pipe = BITFIELD(pkt0_data,
+ tm->tm_pktfield1_slabpos,
+ tm->tm_pktfield1_slabmask,
+ tm->tm_pktfield1_slabshr);
+ uint64_t pkt0_dscp = BITFIELD(pkt0_data,
+ tm->tm_pktfield2_slabpos,
+ tm->tm_pktfield2_slabmask,
+ tm->tm_pktfield2_slabshr);
+ uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2;
+ uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3;
+ uint64_t pkt1_subport = BITFIELD(pkt1_data,
+ tm->tm_pktfield0_slabpos,
+ tm->tm_pktfield0_slabmask,
+ tm->tm_pktfield0_slabshr);
+ uint64_t pkt1_pipe = BITFIELD(pkt1_data,
+ tm->tm_pktfield1_slabpos,
+ tm->tm_pktfield1_slabmask,
+ tm->tm_pktfield1_slabshr);
+ uint64_t pkt1_dscp = BITFIELD(pkt1_data,
+ tm->tm_pktfield2_slabpos,
+ tm->tm_pktfield2_slabmask,
+ tm->tm_pktfield2_slabshr);
+ uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2;
+ uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3;
+
+ uint64_t pkt2_subport = BITFIELD(pkt2_data,
+ tm->tm_pktfield0_slabpos,
+ tm->tm_pktfield0_slabmask,
+ tm->tm_pktfield0_slabshr);
+ uint64_t pkt2_pipe = BITFIELD(pkt2_data,
+ tm->tm_pktfield1_slabpos,
+ tm->tm_pktfield1_slabmask,
+ tm->tm_pktfield1_slabshr);
+ uint64_t pkt2_dscp = BITFIELD(pkt2_data,
+ tm->tm_pktfield2_slabpos,
+ tm->tm_pktfield2_slabmask,
+ tm->tm_pktfield2_slabshr);
+ uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2;
+ uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3;
+
+ uint64_t pkt3_subport = BITFIELD(pkt3_data,
+ tm->tm_pktfield0_slabpos,
+ tm->tm_pktfield0_slabmask,
+ tm->tm_pktfield0_slabshr);
+ uint64_t pkt3_pipe = BITFIELD(pkt3_data,
+ tm->tm_pktfield1_slabpos,
+ tm->tm_pktfield1_slabmask,
+ tm->tm_pktfield1_slabshr);
+ uint64_t pkt3_dscp = BITFIELD(pkt3_data,
+ tm->tm_pktfield2_slabpos,
+ tm->tm_pktfield2_slabmask,
+ tm->tm_pktfield2_slabshr);
+ uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2;
+ uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3;
+
+ uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport,
+ pkt0_pipe,
+ pkt0_tc,
+ pkt0_tc_q,
+ 0);
+ uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport,
+ pkt1_pipe,
+ pkt1_tc,
+ pkt1_tc_q,
+ 0);
+ uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport,
+ pkt2_pipe,
+ pkt2_tc,
+ pkt2_tc_q,
+ 0);
+ uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport,
+ pkt3_pipe,
+ pkt3_tc,
+ pkt3_tc_q,
+ 0);
+
+ pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
+ pkt0->hash.sched.hi = pkt0_sched >> 32;
+ pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
+ pkt1->hash.sched.hi = pkt1_sched >> 32;
+ pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
+ pkt2->hash.sched.hi = pkt2_sched >> 32;
+ pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
+ pkt3->hash.sched.hi = pkt3_sched >> 32;
+ }
+
+ for (; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
+
+ uint64_t pkt_subport = BITFIELD(pkt_data,
+ tm->tm_pktfield0_slabpos,
+ tm->tm_pktfield0_slabmask,
+ tm->tm_pktfield0_slabshr);
+ uint64_t pkt_pipe = BITFIELD(pkt_data,
+ tm->tm_pktfield1_slabpos,
+ tm->tm_pktfield1_slabmask,
+ tm->tm_pktfield1_slabshr);
+ uint64_t pkt_dscp = BITFIELD(pkt_data,
+ tm->tm_pktfield2_slabpos,
+ tm->tm_pktfield2_slabmask,
+ tm->tm_pktfield2_slabshr);
+ uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2;
+ uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3;
+
+ uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport,
+ pkt_pipe,
+ pkt_tc,
+ pkt_tc_q,
+ 0);
+
+ pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
+ pkt->hash.sched.hi = pkt_sched >> 32;
+ }
+}
+
+/*
+ * Soft port packet forward
+ */
+static void
+softport_packet_fwd(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_port *rte_tx_port = &ports[fs->tx_port];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+ uint32_t retry;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+#endif
+
+ /* Packets Receive */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
+ pkts_burst, nb_pkt_per_burst);
+ fs->rx_packets += nb_rx;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+
+ if (rte_tx_port->softnic_enable) {
+ /* Set packet metadata if tm flag enabled */
+ if (rte_tx_port->softport.tm_flag)
+ pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx);
+
+ /* Softport run */
+ rte_pmd_softnic_run(fs->tx_port);
+ }
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ pkts_burst, nb_rx);
+
+ /* Retry if necessary */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
+ fs->tx_packets += nb_tx;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+static void
+set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
+{
+ struct rte_eth_link link_params;
+ uint64_t tm_port_rate;
+
+ memset(&link_params, 0, sizeof(link_params));
+
+ rte_eth_link_get(port_id, &link_params);
+ tm_port_rate = (uint64_t)link_params.link_speed * BYTES_IN_MBPS;
+
+ if (tm_port_rate > UINT32_MAX)
+ tm_port_rate = UINT32_MAX;
+
+ /* Set tm hierarchy shapers rate */
+ h->root_node_shaper_rate = tm_port_rate;
+ h->subport_node_shaper_rate =
+ tm_port_rate / SUBPORT_NODES_PER_PORT;
+ h->pipe_node_shaper_rate
+ = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT;
+ h->tc_node_shaper_rate = h->pipe_node_shaper_rate;
+ h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate;
+}
+
+static int
+softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
+ struct rte_tm_error *error)
+{
+ struct rte_tm_node_params rnp;
+ struct rte_tm_shaper_params rsp;
+ uint32_t priority, weight, level_id, shaper_profile_id;
+
+ memset(&rsp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&rnp, 0, sizeof(struct rte_tm_node_params));
+
+ /* Shaper profile Parameters */
+ rsp.peak.rate = h->root_node_shaper_rate;
+ rsp.peak.size = TOKEN_BUCKET_SIZE;
+ rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ shaper_profile_id = 0;
+
+ if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
+ &rsp, error)) {
+ printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
+ __func__, error->type, error->message,
+ shaper_profile_id);
+ return -1;
+ }
+
+ /* Root Node Parameters */
+ h->root_node_id = ROOT_NODE_ID;
+ weight = 1;
+ priority = 0;
+ level_id = TM_NODE_LEVEL_PORT;
+ rnp.shaper_profile_id = shaper_profile_id;
+ rnp.nonleaf.n_sp_priorities = 1;
+ rnp.stats_mask = STATS_MASK_DEFAULT;
+
+ /* Add Node to TM Hierarchy */
+ if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL,
+ priority, weight, level_id, &rnp, error)) {
+ printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n",
+ __func__, error->type, error->message,
+ h->root_node_id, RTE_TM_NODE_ID_NULL,
+ level_id);
+ return -1;
+ }
+ /* Update */
+ h->n_shapers++;
+
+ printf(" Root node added (Start id %u, Count %u, level %u)\n",
+ h->root_node_id, 1, level_id);
+
+ return 0;
+}
+
+static int
+softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
+ struct rte_tm_error *error)
+{
+ uint32_t subport_parent_node_id, subport_node_id = 0;
+ struct rte_tm_node_params snp;
+ struct rte_tm_shaper_params ssp;
+ uint32_t priority, weight, level_id, shaper_profile_id;
+ uint32_t i;
+
+ memset(&ssp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&snp, 0, sizeof(struct rte_tm_node_params));
+
+ shaper_profile_id = h->n_shapers;
+
+ /* Add Shaper Profile to TM Hierarchy */
+ for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
+ ssp.peak.rate = h->subport_node_shaper_rate;
+ ssp.peak.size = TOKEN_BUCKET_SIZE;
+ ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
+ &ssp, error)) {
+ printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
+ __func__, error->type, error->message,
+ shaper_profile_id);
+ return -1;
+ }
+
+ /* Node Parameters */
+ h->subport_node_id[i] = SUBPORT_NODES_START_ID + i;
+ subport_parent_node_id = h->root_node_id;
+ weight = 1;
+ priority = 0;
+ level_id = TM_NODE_LEVEL_SUBPORT;
+ snp.shaper_profile_id = shaper_profile_id;
+ snp.nonleaf.n_sp_priorities = 1;
+ snp.stats_mask = STATS_MASK_DEFAULT;
+
+ /* Add Node to TM Hiearchy */
+ if (rte_tm_node_add(port_id,
+ h->subport_node_id[i],
+ subport_parent_node_id,
+ priority, weight,
+ level_id,
+ &snp,
+ error)) {
+ printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n",
+ __func__,
+ error->type,
+ error->message,
+ h->subport_node_id[i],
+ subport_parent_node_id,
+ level_id);
+ return -1;
+ }
+ shaper_profile_id++;
+ subport_node_id++;
+ }
+ /* Update */
+ h->n_shapers = shaper_profile_id;
+
+ printf(" Subport nodes added (Start id %u, Count %u, level %u)\n",
+ h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id);
+
+ return 0;
+}
+
+static int
+softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
+ struct rte_tm_error *error)
+{
+ uint32_t pipe_parent_node_id;
+ struct rte_tm_node_params pnp;
+ struct rte_tm_shaper_params psp;
+ uint32_t priority, weight, level_id, shaper_profile_id;
+ uint32_t i, j;
+
+ memset(&psp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&pnp, 0, sizeof(struct rte_tm_node_params));
+
+ shaper_profile_id = h->n_shapers;
+
+ /* Shaper Profile Parameters */
+ psp.peak.rate = h->pipe_node_shaper_rate;
+ psp.peak.size = TOKEN_BUCKET_SIZE;
+ psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ /* Pipe Node Parameters */
+ weight = 1;
+ priority = 0;
+ level_id = TM_NODE_LEVEL_PIPE;
+ pnp.nonleaf.n_sp_priorities = 4;
+ pnp.stats_mask = STATS_MASK_DEFAULT;
+
+ /* Add Shaper Profiles and Nodes to TM Hierarchy */
+ for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
+ for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
+ if (rte_tm_shaper_profile_add(port_id,
+ shaper_profile_id, &psp, error)) {
+ printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
+ __func__, error->type, error->message,
+ shaper_profile_id);
+ return -1;
+ }
+ pnp.shaper_profile_id = shaper_profile_id;
+ pipe_parent_node_id = h->subport_node_id[i];
+ h->pipe_node_id[i][j] = PIPE_NODES_START_ID +
+ (i * PIPE_NODES_PER_SUBPORT) + j;
+
+ if (rte_tm_node_add(port_id,
+ h->pipe_node_id[i][j],
+ pipe_parent_node_id,
+ priority, weight, level_id,
+ &pnp,
+ error)) {
+ printf("%s ERROR(%d)-%s!(node %u,parent %u )\n",
+ __func__,
+ error->type,
+ error->message,
+ h->pipe_node_id[i][j],
+ pipe_parent_node_id);
+
+ return -1;
+ }
+ shaper_profile_id++;
+ }
+ }
+ /* Update */
+ h->n_shapers = shaper_profile_id;
+
+ printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n",
+ h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id);
+
+ return 0;
+}
+
+static int
+softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
+ struct rte_tm_error *error)
+{
+ uint32_t tc_parent_node_id;
+ struct rte_tm_node_params tnp;
+ struct rte_tm_shaper_params tsp, tssp;
+ uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE];
+ uint32_t priority, weight, level_id, shaper_profile_id;
+ uint32_t pos, n_tc_nodes, i, j, k;
+
+ memset(&tsp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&tssp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&tnp, 0, sizeof(struct rte_tm_node_params));
+
+ shaper_profile_id = h->n_shapers;
+
+ /* Private Shaper Profile (TC) Parameters */
+ tsp.peak.rate = h->tc_node_shaper_rate;
+ tsp.peak.size = TOKEN_BUCKET_SIZE;
+ tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ /* Shared Shaper Profile (TC) Parameters */
+ tssp.peak.rate = h->tc_node_shared_shaper_rate;
+ tssp.peak.size = TOKEN_BUCKET_SIZE;
+ tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+
+ /* TC Node Parameters */
+ weight = 1;
+ level_id = TM_NODE_LEVEL_TC;
+ tnp.n_shared_shapers = 1;
+ tnp.nonleaf.n_sp_priorities = 1;
+ tnp.stats_mask = STATS_MASK_DEFAULT;
+
+ /* Add Shared Shaper Profiles to TM Hierarchy */
+ for (i = 0; i < TC_NODES_PER_PIPE; i++) {
+ shared_shaper_profile_id[i] = shaper_profile_id;
+
+ if (rte_tm_shaper_profile_add(port_id,
+ shared_shaper_profile_id[i], &tssp, error)) {
+ printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n",
+ __func__, error->type, error->message,
+ shared_shaper_profile_id[i]);
+
+ return -1;
+ }
+ if (rte_tm_shared_shaper_add_update(port_id, i,
+ shared_shaper_profile_id[i], error)) {
+ printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n",
+ __func__, error->type, error->message, i);
+
+ return -1;
+ }
+ shaper_profile_id++;
+ }
+
+ /* Add Shaper Profiles and Nodes to TM Hierarchy */
+ n_tc_nodes = 0;
+ for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
+ for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
+ for (k = 0; k < TC_NODES_PER_PIPE ; k++) {
+ priority = k;
+ tc_parent_node_id = h->pipe_node_id[i][j];
+ tnp.shared_shaper_id =
+ (uint32_t *)calloc(1, sizeof(uint32_t));
+ tnp.shared_shaper_id[0] = k;
+ pos = j + (i * PIPE_NODES_PER_SUBPORT);
+ h->tc_node_id[pos][k] =
+ TC_NODES_START_ID + n_tc_nodes;
+
+ if (rte_tm_shaper_profile_add(port_id,
+ shaper_profile_id, &tsp, error)) {
+ printf("%s ERROR(%d)-%s!(shaper %u)\n",
+ __func__, error->type,
+ error->message,
+ shaper_profile_id);
+
+ return -1;
+ }
+ tnp.shaper_profile_id = shaper_profile_id;
+ if (rte_tm_node_add(port_id,
+ h->tc_node_id[pos][k],
+ tc_parent_node_id,
+ priority, weight,
+ level_id,
+ &tnp, error)) {
+ printf("%s ERROR(%d)-%s!(node id %u)\n",
+ __func__,
+ error->type,
+ error->message,
+ h->tc_node_id[pos][k]);
+
+ return -1;
+ }
+ shaper_profile_id++;
+ n_tc_nodes++;
+ }
+ }
+ }
+ /* Update */
+ h->n_shapers = shaper_profile_id;
+
+ printf(" TC nodes added (Start id %u, Count %u, level %u)\n",
+ h->tc_node_id[0][0], n_tc_nodes, level_id);
+
+ return 0;
+}
+
+static int
+softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
+ struct rte_tm_error *error)
+{
+ uint32_t queue_parent_node_id;
+ struct rte_tm_node_params qnp;
+ uint32_t priority, weight, level_id, pos;
+ uint32_t n_queue_nodes, i, j, k;
+
+ memset(&qnp, 0, sizeof(struct rte_tm_node_params));
+
+ /* Queue Node Parameters */
+ priority = 0;
+ weight = 1;
+ level_id = TM_NODE_LEVEL_QUEUE;
+ qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
+ qnp.stats_mask = STATS_MASK_QUEUE;
+
+ /* Add Queue Nodes to TM Hierarchy */
+ n_queue_nodes = 0;
+ for (i = 0; i < NUM_PIPE_NODES; i++) {
+ for (j = 0; j < TC_NODES_PER_PIPE; j++) {
+ queue_parent_node_id = h->tc_node_id[i][j];
+ for (k = 0; k < QUEUE_NODES_PER_TC; k++) {
+ pos = j + (i * TC_NODES_PER_PIPE);
+ h->queue_node_id[pos][k] = n_queue_nodes;
+ if (rte_tm_node_add(port_id,
+ h->queue_node_id[pos][k],
+ queue_parent_node_id,
+ priority,
+ weight,
+ level_id,
+ &qnp, error)) {
+ printf("%s ERROR(%d)-%s!(node %u)\n",
+ __func__,
+ error->type,
+ error->message,
+ h->queue_node_id[pos][k]);
+
+ return -1;
+ }
+ n_queue_nodes++;
+ }
+ }
+ }
+ printf(" Queue nodes added (Start id %u, Count %u, level %u)\n",
+ h->queue_node_id[0][0], n_queue_nodes, level_id);
+
+ return 0;
+}
+
+/*
+ * TM Packet Field Setup
+ */
+static void
+softport_tm_pktfield_setup(portid_t port_id)
+{
+ struct rte_port *p = &ports[port_id];
+ uint64_t pktfield0_mask = 0;
+ uint64_t pktfield1_mask = 0x0000000FFF000000LLU;
+ uint64_t pktfield2_mask = 0x00000000000000FCLLU;
+
+ p->softport.tm = (struct softnic_port_tm) {
+ .n_subports_per_port = SUBPORT_NODES_PER_PORT,
+ .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT,
+
+ /* Packet field to identify subport
+ *
+ * Default configuration assumes only one subport, thus
+ * the subport ID is hardcoded to 0
+ */
+ .tm_pktfield0_slabpos = 0,
+ .tm_pktfield0_slabmask = pktfield0_mask,
+ .tm_pktfield0_slabshr =
+ __builtin_ctzll(pktfield0_mask),
+
+ /* Packet field to identify pipe.
+ *
+ * Default value assumes Ethernet/IPv4/UDP packets,
+ * UDP payload bits 12 .. 23
+ */
+ .tm_pktfield1_slabpos = 40,
+ .tm_pktfield1_slabmask = pktfield1_mask,
+ .tm_pktfield1_slabshr =
+ __builtin_ctzll(pktfield1_mask),
+
+ /* Packet field used as index into TC translation table
+ * to identify the traffic class and queue.
+ *
+ * Default value assumes Ethernet/IPv4 packets, IPv4
+ * DSCP field
+ */
+ .tm_pktfield2_slabpos = 8,
+ .tm_pktfield2_slabmask = pktfield2_mask,
+ .tm_pktfield2_slabshr =
+ __builtin_ctzll(pktfield2_mask),
+
+ .tm_tc_table = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ }, /**< TC translation table */
+ };
+}
+
+static int
+softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
+{
+
+ struct tm_hierarchy h;
+ int status;
+
+ memset(&h, 0, sizeof(struct tm_hierarchy));
+
+ /* TM hierarchy shapers rate */
+ set_tm_hiearchy_nodes_shaper_rate(port_id, &h);
+
+ /* Add root node (level 0) */
+ status = softport_tm_root_node_add(port_id, &h, error);
+ if (status)
+ return status;
+
+ /* Add subport node (level 1) */
+ status = softport_tm_subport_node_add(port_id, &h, error);
+ if (status)
+ return status;
+
+ /* Add pipe nodes (level 2) */
+ status = softport_tm_pipe_node_add(port_id, &h, error);
+ if (status)
+ return status;
+
+ /* Add traffic class nodes (level 3) */
+ status = softport_tm_tc_node_add(port_id, &h, error);
+ if (status)
+ return status;
+
+ /* Add queue nodes (level 4) */
+ status = softport_tm_queue_node_add(port_id, &h, error);
+ if (status)
+ return status;
+
+ /* TM packet fields setup */
+ softport_tm_pktfield_setup(port_id);
+
+ return 0;
+}
+
+/*
+ * Soft port Init
+ */
+static void
+softport_tm_begin(portid_t pi)
+{
+ struct rte_port *port = &ports[pi];
+
+ /* Soft port TM flag */
+ if (port->softport.tm_flag == 1) {
+ printf("\n\n TM feature available on port %u\n", pi);
+
+ /* Soft port TM hierarchy configuration */
+ if ((port->softport.tm.hierarchy_config == 0) &&
+ (port->softport.tm.default_hierarchy_enable == 1)) {
+ struct rte_tm_error error;
+ int status;
+
+ /* Stop port */
+ rte_eth_dev_stop(pi);
+
+ /* TM hierarchy specification */
+ status = softport_tm_hierarchy_specify(pi, &error);
+ if (status) {
+ printf(" TM Hierarchy built error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf("\n TM Hierarchy Specified!\n\v");
+
+ /* TM hierarchy commit */
+ status = rte_tm_hierarchy_commit(pi, 0, &error);
+ if (status) {
+ printf(" Hierarchy commit error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf(" Hierarchy Committed (port %u)!", pi);
+ port->softport.tm.hierarchy_config = 1;
+
+ /* Start port */
+ status = rte_eth_dev_start(pi);
+ if (status) {
+ printf("\n Port %u start error!\n", pi);
+ return;
+ }
+ printf("\n Port %u started!\n", pi);
+ return;
+ }
+ }
+ printf("\n TM feature not available on port %u", pi);
+}
+
+struct fwd_engine softnic_tm_engine = {
+ .fwd_mode_name = "tm",
+ .port_fwd_begin = softport_tm_begin,
+ .port_fwd_end = NULL,
+ .packet_fwd = softport_packet_fwd,
+};
+
+struct fwd_engine softnic_tm_bypass_engine = {
+ .fwd_mode_name = "tm-bypass",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = softport_packet_fwd,
+};
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 7070ddc3..309c7389 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/buildtools/auto-config-h.sh b/buildtools/auto-config-h.sh
index 4356d7e3..cb8bce9b 100755
--- a/buildtools/auto-config-h.sh
+++ b/buildtools/auto-config-h.sh
@@ -114,9 +114,9 @@ printf "\
" "$include" "$code" > "${temp}" &&
if ${CC} ${CPPFLAGS} ${EXTRA_CPPFLAGS} ${CFLAGS} ${EXTRA_CFLAGS} \
${AUTO_CONFIG_CFLAGS} \
- -c -o /dev/null "${temp}" 1>&${out} 2>&${err}
+ -c -o ${temp}.o "${temp}" 1>&${out} 2>&${err}
then
- rm -f "${temp}"
+ rm -f "${temp}" "${temp}.o"
printf "\
#ifndef %s
#define %s 1
@@ -125,7 +125,7 @@ then
" "${macro}" "${macro}" "${macro}" >> "${file}" &&
printf 'Defining %s.\n' "${macro}"
else
- rm -f "${temp}"
+ rm -f "${temp}" "${temp}.o"
printf "\
/* %s is not defined. */
diff --git a/buildtools/pmdinfogen/pmdinfogen.c b/buildtools/pmdinfogen/pmdinfogen.c
index ba1a12e2..96ccbf33 100644
--- a/buildtools/pmdinfogen/pmdinfogen.c
+++ b/buildtools/pmdinfogen/pmdinfogen.c
@@ -327,6 +327,10 @@ static int locate_pmd_entries(struct elf_info *info)
do {
new = calloc(sizeof(struct pmd_driver), 1);
+ if (new == NULL) {
+ fprintf(stderr, "Failed to calloc memory\n");
+ return -1;
+ }
new->name_sym = find_sym_in_symtab(info, "this_pmd_name", last);
last = new->name_sym;
if (!new->name_sym)
@@ -397,7 +401,7 @@ static void output_pmd_info_string(struct elf_info *info, char *outfile)
int main(int argc, char **argv)
{
- struct elf_info info;
+ struct elf_info info = {0};
int rc = 1;
if (argc < 3) {
@@ -408,7 +412,8 @@ int main(int argc, char **argv)
}
parse_elf(&info, argv[1]);
- locate_pmd_entries(&info);
+ if (locate_pmd_entries(&info) < 0)
+ exit(1);
if (info.drivers) {
output_pmd_info_string(&info, argv[2]);
diff --git a/config/common_armv8a_linuxapp b/config/common_armv8a_linuxapp
index 880313ab..6732d1e3 100644
--- a/config/common_armv8a_linuxapp
+++ b/config/common_armv8a_linuxapp
@@ -44,8 +44,6 @@ CONFIG_RTE_FORCE_INTRINSICS=y
# to address minimum DMA alignment across all arm64 implementations.
CONFIG_RTE_CACHE_LINE_SIZE=128
-CONFIG_RTE_EAL_IGB_UIO=n
-
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/common_base b/config/common_base
index 5e97a08b..34f04a9f 100644
--- a/config/common_base
+++ b/config/common_base
@@ -93,6 +93,7 @@ CONFIG_RTE_MAX_NUMA_NODES=8
CONFIG_RTE_MAX_MEMSEG=256
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
+CONFIG_RTE_ENABLE_ASSERT=n
CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
@@ -122,6 +123,11 @@ CONFIG_RTE_EAL_PMD_PATH=""
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y
#
+# Compile the PCI library
+#
+CONFIG_RTE_LIBRTE_PCI=y
+
+#
# Compile the argument parser library
#
CONFIG_RTE_LIBRTE_KVARGS=y
@@ -136,6 +142,7 @@ CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
CONFIG_RTE_LIBRTE_IEEE1588=n
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
+CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n
#
# Turn off Tx preparation stage
@@ -146,6 +153,16 @@ CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
#
+# Compile PCI bus driver
+#
+CONFIG_RTE_LIBRTE_PCI_BUS=y
+
+#
+# Compile the vdev bus
+#
+CONFIG_RTE_LIBRTE_VDEV_BUS=y
+
+#
# Compile burst-oriented Amazon ENA PMD driver
#
CONFIG_RTE_LIBRTE_ENA_PMD=y
@@ -214,10 +231,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n
-CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N=4
-CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE=0
CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
-CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
#
# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD
@@ -262,6 +276,11 @@ CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG=n
#
+# Compile Marvell PMD driver
+#
+CONFIG_RTE_LIBRTE_MRVL_PMD=n
+
+#
# Compile burst-oriented Broadcom BNXT PMD driver
#
CONFIG_RTE_LIBRTE_BNXT_PMD=y
@@ -273,6 +292,11 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=y
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
#
+# Compile SOFTNIC PMD
+#
+CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y
+
+#
# Compile software PMD backed by SZEDATA2 device
#
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
@@ -303,6 +327,21 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
+# NXP DPAA Bus
+CONFIG_RTE_LIBRTE_DPAA_BUS=n
+CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
+CONFIG_RTE_LIBRTE_DPAA_PMD=n
+
+#
+# Compile burst-oriented Cavium OCTEONTX network PMD driver
+#
+CONFIG_RTE_LIBRTE_OCTEONTX_PMD=y
+CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_MBOX=n
+
#
# Compile NXP DPAA2 FSL-MC Bus
#
@@ -376,6 +415,7 @@ CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=n
#Provides abs path/name of the firmware file.
#Empty string denotes driver will use default firmware
CONFIG_RTE_LIBRTE_QEDE_FW=""
@@ -411,11 +451,6 @@ CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n
CONFIG_RTE_LIBRTE_PMD_TAP=n
#
-# Compile Xen PMD
-#
-CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
-
-#
# Compile null PMD
#
CONFIG_RTE_LIBRTE_PMD_NULL=y
@@ -453,6 +488,14 @@ CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
#
+# NXP DPAA caam - crypto driver
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n
+
+#
# Compile PMD for QuickAssist based devices
#
CONFIG_RTE_LIBRTE_PMD_QAT=n
@@ -514,6 +557,17 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
+# Compile PMD for Marvell Crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
+CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
+
+#
+# Compile generic security library
+#
+CONFIG_RTE_LIBRTE_SECURITY=y
+
+#
# Compile generic event device library
#
CONFIG_RTE_LIBRTE_EVENTDEV=y
@@ -558,6 +612,12 @@ CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
#
+# Compile PMD for octeontx fpa mempool device
+#
+CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=y
+CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG=n
+
+#
# Compile librte_mbuf
#
CONFIG_RTE_LIBRTE_MBUF=y
@@ -595,6 +655,11 @@ CONFIG_RTE_LIBRTE_HASH_DEBUG=n
CONFIG_RTE_LIBRTE_EFD=y
#
+# Compile librte_member
+#
+CONFIG_RTE_LIBRTE_MEMBER=y
+
+#
# Compile librte_jobstats
#
CONFIG_RTE_LIBRTE_JOBSTATS=y
@@ -652,11 +717,21 @@ CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n
CONFIG_RTE_LIBRTE_GRO=y
#
+# Compile GSO library
+#
+CONFIG_RTE_LIBRTE_GSO=y
+
+#
# Compile librte_meter
#
CONFIG_RTE_LIBRTE_METER=y
#
+# Compile librte_classify
+#
+CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y
+
+#
# Compile librte_sched
#
CONFIG_RTE_LIBRTE_SCHED=y
@@ -724,11 +799,6 @@ CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_VHOST=n
#
-#Compile Xen domain0 support
-#
-CONFIG_RTE_LIBRTE_XEN_DOM0=n
-
-#
# Compile the test application
#
CONFIG_RTE_APP_TEST=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 00bc2ab9..a20b7a85 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -76,8 +76,7 @@ CONFIG_RTE_LIBRTE_I40E_PMD=n
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
-CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
-CONFIG_RTE_LIBRTE_PMD_BNX2X=n
+CONFIG_RTE_LIBRTE_BNX2X_PMD=n
CONFIG_RTE_LIBRTE_QEDE_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc b/config/defconfig_arm64-dpaa-linuxapp-gcc
new file mode 100644
index 00000000..e577432f
--- /dev/null
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -0,0 +1,76 @@
+# BSD LICENSE
+#
+# Copyright 2016 Freescale Semiconductor, Inc.
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include "defconfig_arm64-armv8a-linuxapp-gcc"
+
+# NXP (Freescale) - Soc Architecture with FMAN, QMAN & BMAN support
+CONFIG_RTE_MACHINE="dpaa"
+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_MAX_LCORE=4
+CONFIG_RTE_MAX_NUMA_NODES=1
+CONFIG_RTE_CACHE_LINE_SIZE=64
+CONFIG_RTE_PKTMBUF_HEADROOM=128
+
+# NXP DPAA Bus
+CONFIG_RTE_LIBRTE_DPAA_BUS=y
+CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
+
+# NXP DPAA Mempool
+CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa"
+
+# Compile software NXP DPAA PMD
+CONFIG_RTE_LIBRTE_DPAA_PMD=y
+
+#
+# FSL DPAA caam - crypto driver
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=y
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n
+
+# DPAA CAAM driver instances
+CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
+
+#
+# Number of sessions to create in the session memory pool
+# on a single DPAA SEC device.
+#
+CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS=2048
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 8a42944a..91f4993d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -39,7 +39,7 @@ CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
#
# Compile Environment Abstraction Layer
#
-CONFIG_RTE_MAX_LCORE=8
+CONFIG_RTE_MAX_LCORE=16
CONFIG_RTE_MAX_NUMA_NODES=1
CONFIG_RTE_CACHE_LINE_SIZE=64
diff --git a/devtools/get-maintainer.sh b/devtools/get-maintainer.sh
new file mode 100755
index 00000000..5ae6b5ae
--- /dev/null
+++ b/devtools/get-maintainer.sh
@@ -0,0 +1,85 @@
+#!/bin/sh
+
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Load config options:
+# - DPDK_GETMAINTAINER_PATH
+. $(dirname $(readlink -e $0))/load-devel-config
+
+options="--no-git-fallback"
+options="$options --no-rolestats"
+
+print_usage () {
+ cat <<- END_OF_HELP
+ usage: $(basename $0) <patch>
+ END_OF_HELP
+}
+
+# Requires DPDK_GETMAINTAINER_PATH devel config option set,
+# please check devtools/load-devel-config.
+# DPDK_GETMAINTAINER_PATH should be full path to the get_maintainer.pl script,
+# like:
+# DPDK_GETMAINTAINER_PATH=~/linux/scripts/get_maintainer.pl
+
+if [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then
+ print_usage >&2
+ echo
+ echo 'Cannot execute DPDK_GETMAINTAINER_PATH' >&2
+ exit 1
+fi
+
+FILES="COPYING CREDITS Kbuild"
+FOLDERS="Documentation arch include fs init ipc kernel scripts"
+
+# Kernel script checks for some files and folders to run
+workaround () {
+ for f in $FILES; do
+ if [ ! -f $f ]; then touch $f; fi
+ done
+
+ for d in $FOLDERS; do
+ if [ ! -d $d ]; then mkdir $d; fi
+ done
+}
+
+fix_workaround () {
+ for f in $FILES; do if [ -f $f ]; then rm -f $f; fi; done
+ for d in $FOLDERS; do if [ -d $d ]; then rmdir $d; fi; done
+}
+
+# clean workaround on exit
+trap fix_workaround EXIT
+
+workaround
+$DPDK_GETMAINTAINER_PATH $options $@
+# fix_workaround called on exit by trap
diff --git a/devtools/git-log-fixes.sh b/devtools/git-log-fixes.sh
index 58006874..cd5cf893 100755
--- a/devtools/git-log-fixes.sh
+++ b/devtools/git-log-fixes.sh
@@ -66,7 +66,16 @@ range="$*"
# get major release version of a commit
commit_version () # <hash>
{
- tag=$(git tag -l --contains $1 --merged | head -n1)
+ # use current branch as history reference
+ local refbranch=$(git rev-parse --abbrev-ref HEAD)
+ local tag=$( (git tag -l --contains $1 --merged $refbranch 2>&- ||
+ # tag --merged option has been introduced in git 2.7.0
+ # below is a fallback in case of old git version
+ for t in $(git tag -l --contains $1) ; do
+ git branch $refbranch --contains $t |
+ sed "s,.\+,$t,"
+ done) |
+ head -n1)
if [ -z "$tag" ] ; then
# before -rc1 tag of release in progress
make showversion | cut -d'.' -f-2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index c6dfaf0a..092d3a7f 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -39,7 +39,7 @@ default_path=$PATH
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
# - DPDK_DEP_LDFLAGS
-# - DPDK_DEP_MOFED (y/[n])
+# - DPDK_DEP_MLX (y/[n])
# - DPDK_DEP_NUMA ([y]/n)
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
@@ -47,6 +47,7 @@ default_path=$PATH
# - DPDK_DEP_ZLIB (y/[n])
# - DPDK_MAKE_JOBS (int)
# - DPDK_NOTIFY (notify-send)
+# - LIBMUSDK_PATH
# - LIBSSO_SNOW3G_PATH
# - LIBSSO_KASUMI_PATH
# - LIBSSO_ZUC_PATH
@@ -121,7 +122,7 @@ reset_env ()
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
unset DPDK_DEP_LDFLAGS
- unset DPDK_DEP_MOFED
+ unset DPDK_DEP_MLX
unset DPDK_DEP_NUMA
unset DPDK_DEP_PCAP
unset DPDK_DEP_SSL
@@ -129,6 +130,7 @@ reset_env ()
unset DPDK_DEP_ZLIB
unset AESNI_MULTI_BUFFER_LIB_PATH
unset ARMV8_CRYPTO_LIB_PATH
+ unset LIBMUSDK_PATH
unset LIBSSO_SNOW3G_PATH
unset LIBSSO_KASUMI_PATH
unset LIBSSO_ZUC_PATH
@@ -167,7 +169,7 @@ config () # <directory> <target> <options>
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
sed -ri 's,(RESOURCE_TAR=)n,\1y,' $1/.config
- test "$DPDK_DEP_MOFED" != y || \
+ test "$DPDK_DEP_MLX" != y || \
sed -ri 's,(MLX._PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_SZE" != y || \
sed -ri 's,(PMD_SZEDATA2=)n,\1y,' $1/.config
@@ -193,6 +195,8 @@ config () # <directory> <target> <options>
test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config
sed -ri 's,(SCHED_.*=)n,\1y,' $1/.config
+ test -z "$LIBMUSDK_PATH" || \
+ sed -ri 's,(PMD_MRVL_CRYPTO=)n,\1y,' $1/.config
build_config_hook $1 $2 $3
# Explicit enabler/disabler (uppercase)
diff --git a/devtools/validate-abi.sh b/devtools/validate-abi.sh
index 0accc99b..8caf43e8 100755
--- a/devtools/validate-abi.sh
+++ b/devtools/validate-abi.sh
@@ -1,7 +1,8 @@
-#!/bin/sh
+#!/usr/bin/env bash
# BSD LICENSE
#
# Copyright(c) 2015 Neil Horman. All rights reserved.
+# Copyright(c) 2017 6WIND S.A.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,236 +28,248 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-TAG1=$1
-TAG2=$2
-TARGET=$3
-ABI_DIR=`mktemp -d -p /tmp ABI.XXXXXX`
+set -e
-usage() {
- echo "$0 <REV1> <REV2> <TARGET>"
-}
+abicheck=abi-compliance-checker
+abidump=abi-dumper
+default_dst=abi-check
+default_target=x86_64-native-linuxapp-gcc
-log() {
- local level=$1
- shift
- echo "$*"
+# trap on error
+err_report() {
+ echo "$0: error at line $1"
}
+trap 'err_report $LINENO' ERR
-validate_tags() {
+print_usage () {
+ cat <<- END_OF_HELP
+ $(basename $0) [options] <rev1> <rev2>
- if [ -z "$HASH1" ]
- then
- echo "invalid revision: $TAG1"
- return
- fi
- if [ -z "$HASH2" ]
- then
- echo "invalid revision: $TAG2"
- return
- fi
+ This script compares the ABI of 2 git revisions of the current
+ workspace. The output is a html report and a compilation log.
+
+ The objective is to make sure that applications built against
+ DSOs from the first revision can still run when executed using
+ the DSOs built from the second revision.
+
+ <rev1> and <rev2> are git commit id or tags.
+
+ Options:
+ -h show this help
+ -j <num> enable parallel compilation with <num> threads
+ -v show compilation logs on the console
+ -d <dir> change working directory (default is ${default_dst})
+ -t <target> the dpdk target to use (default is ${default_target})
+ -f overwrite existing files in destination directory
+
+ The script returns 0 on success, or the value of last failing
+ call of ${abicheck} (incompatible abi or the tool has run with errors).
+ The errors returned by ${abidump} are ignored.
+
+ END_OF_HELP
}
-validate_args() {
- if [ -z "$TAG1" ]
- then
- echo "Must Specify REV1"
- return
- fi
- if [ -z "$TAG2" ]
- then
- echo "Must Specify REV2"
- return
- fi
- if [ -z "$TARGET" ]
- then
- echo "Must Specify a build target"
+# log in the file, and on stdout if verbose
+# $1: level string
+# $2: string to be logged
+log() {
+ echo "$1: $2"
+ if [ "${verbose}" != "true" ]; then
+ echo "$1: $2" >&3
fi
}
+# launch a command and log it, taking care of surrounding spaces with quotes
+cmd() {
+ local i s whitespace ret
+ s=""
+ whitespace="[[:space:]]"
+ for i in "$@"; do
+ if [[ $i =~ $whitespace ]]; then
+ i=\"$i\"
+ fi
+ if [ -z "$s" ]; then
+ s="$i"
+ else
+ s="$s $i"
+ fi
+ done
+
+ ret=0
+ log "CMD" "$s"
+ "$@" || ret=$?
+ if [ "$ret" != "0" ]; then
+ log "CMD" "previous command returned $ret"
+ fi
+
+ return $ret
+}
-cleanup_and_exit() {
- rm -rf $ABI_DIR
- git checkout $CURRENT_BRANCH
- exit $1
+# redirect or copy stderr/stdout to a file
+# the syntax is unfamiliar, but it makes the rest of the
+# code easier to read, avoiding the use of pipes
+set_log_file() {
+ # save original stdout and stderr in fd 3 and 4
+ exec 3>&1
+ exec 4>&2
+ # create a new fd 5 that send to a file
+ exec 5> >(cat > $1)
+ # send stdout and stderr to fd 5
+ if [ "${verbose}" = "true" ]; then
+ exec 1> >(tee /dev/fd/5 >&3)
+ exec 2> >(tee /dev/fd/5 >&4)
+ else
+ exec 1>&5
+ exec 2>&5
+ fi
}
# Make sure we configure SHARED libraries
# Also turn off IGB and KNI as those require kernel headers to build
fixup_config() {
- sed -i -e"$ a\CONFIG_RTE_BUILD_SHARED_LIB=y" config/defconfig_$TARGET
- sed -i -e"$ a\CONFIG_RTE_NEXT_ABI=n" config/defconfig_$TARGET
- sed -i -e"$ a\CONFIG_RTE_EAL_IGB_UIO=n" config/defconfig_$TARGET
- sed -i -e"$ a\CONFIG_RTE_LIBRTE_KNI=n" config/defconfig_$TARGET
- sed -i -e"$ a\CONFIG_RTE_KNI_KMOD=n" config/defconfig_$TARGET
+ local conf=config/defconfig_$target
+ cmd sed -i -e"$ a\CONFIG_RTE_BUILD_SHARED_LIB=y" $conf
+ cmd sed -i -e"$ a\CONFIG_RTE_NEXT_ABI=n" $conf
+ cmd sed -i -e"$ a\CONFIG_RTE_EAL_IGB_UIO=n" $conf
+ cmd sed -i -e"$ a\CONFIG_RTE_LIBRTE_KNI=n" $conf
+ cmd sed -i -e"$ a\CONFIG_RTE_KNI_KMOD=n" $conf
}
-###########################################
-#START
-############################################
+# build dpdk for the given tag and dump abi
+# $1: hash of the revision
+gen_abi() {
+ local i
+
+ cmd git clone ${dpdkroot} ${dst}/${1}
+ cmd cd ${dst}/${1}
+
+ log "INFO" "Checking out version ${1} of the dpdk"
+ # Move to the old version of the tree
+ cmd git checkout ${1}
+
+ fixup_config
+
+ # Now configure the build
+ log "INFO" "Configuring DPDK ${1}"
+ cmd make config T=$target O=$target
+
+ # Checking abi compliance relies on using the dwarf information in
+ # the shared objects. Build with -g to include them.
+ log "INFO" "Building DPDK ${1}. This might take a moment"
+ cmd make -j$parallel O=$target V=1 EXTRA_CFLAGS="-g -Og -Wno-error" \
+ EXTRA_LDFLAGS="-g" || log "INFO" "The build failed"
+
+ # Move to the lib directory
+ cmd cd ${PWD}/$target/lib
+ log "INFO" "Collecting ABI information for ${1}"
+ for i in *.so; do
+ [ -e "$i" ] || break
+ cmd $abidump ${i} -o $dst/${1}/${i}.dump -lver ${1} || true
+ # hack to ignore empty SymbolsInfo section (no public ABI)
+ if grep -q "'SymbolInfo' => {}," $dst/${1}/${i}.dump \
+ 2> /dev/null; then
+ log "INFO" "${i} has no public ABI, remove dump file"
+ cmd rm -f $dst/${1}/${i}.dump
+ fi
+ done
+}
-#trap on ctrl-c to clean up
-trap cleanup_and_exit SIGINT
+verbose=false
+parallel=1
+dst=${default_dst}
+target=${default_target}
+force=0
+while getopts j:vd:t:fh ARG ; do
+ case $ARG in
+ j ) parallel=$OPTARG ;;
+ v ) verbose=true ;;
+ d ) dst=$OPTARG ;;
+ t ) target=$OPTARG ;;
+ f ) force=1 ;;
+ h ) print_usage ; exit 0 ;;
+ ? ) print_usage ; exit 1 ;;
+ esac
+done
+shift $(($OPTIND - 1))
-if [ -z "$DPDK_MAKE_JOBS" ]
-then
- # This counts the number of cpus on the system
- if [ -e /usr/bin/lscpu ]
- then
- DPDK_MAKE_JOBS=`lscpu -p=cpu | grep -v "#" | wc -l`
- else
- DPDK_MAKE_JOBS=1
- fi
+if [ $# != 2 ]; then
+ print_usage
+ exit 1
fi
-#Save the current branch
-CURRENT_BRANCH=`git branch | grep \* | cut -d' ' -f2`
+tag1=$1
+tag2=$2
-if [ -z "$CURRENT_BRANCH" ]
-then
- CURRENT_BRANCH=`git log --pretty=format:%H HEAD~1..HEAD`
-fi
+# convert path to absolute
+case "${dst}" in
+ /*) ;;
+ *) dst=${PWD}/${dst} ;;
+esac
+dpdkroot=$(readlink -e $(dirname $0)/..)
-if [ -n "$VERBOSE" ]
-then
- export VERBOSE=/dev/stdout
-else
- export VERBOSE=/dev/null
+if [ -e "${dst}" -a "$force" = 0 ]; then
+ echo "The ${dst} directory is not empty. Remove it, use another"
+ echo "one (-d <dir>), or force overriding (-f)"
+ exit 1
fi
-# Validate that we have all the arguments we need
-res=$(validate_args)
-if [ -n "$res" ]
-then
- echo $res
- usage
- cleanup_and_exit 1
-fi
+rm -rf ${dst}
+mkdir -p ${dst}
+set_log_file ${dst}/abi-check.log
+log "INFO" "Logs available in ${dst}/abi-check.log"
-HASH1=$(git show -s --format=%H "$TAG1" -- 2> /dev/null | tail -1)
-HASH2=$(git show -s --format=%H "$TAG2" -- 2> /dev/null | tail -1)
+command -v ${abicheck} || log "INFO" "Can't find ${abicheck} utility"
+command -v ${abidump} || log "INFO" "Can't find ${abidump} utility"
-# Make sure our tags exist
-res=$(validate_tags)
-if [ -n "$res" ]
-then
- echo $res
- cleanup_and_exit 1
-fi
+hash1=$(git show -s --format=%h "$tag1" -- 2> /dev/null | tail -1)
+hash2=$(git show -s --format=%h "$tag2" -- 2> /dev/null | tail -1)
# Make hashes available in output for non-local reference
-TAG1="$TAG1 ($HASH1)"
-TAG2="$TAG2 ($HASH2)"
-
-ABICHECK=`which abi-compliance-checker 2>/dev/null`
-if [ $? -ne 0 ]
-then
- log "INFO" "Can't find abi-compliance-checker utility"
- cleanup_and_exit 1
-fi
-
-ABIDUMP=`which abi-dumper 2>/dev/null`
-if [ $? -ne 0 ]
-then
- log "INFO" "Can't find abi-dumper utility"
- cleanup_and_exit 1
-fi
+tag1="$tag1 ($hash1)"
+tag2="$tag2 ($hash2)"
-log "INFO" "We're going to check and make sure that applications built"
-log "INFO" "against DPDK DSOs from version $TAG1 will still run when executed"
-log "INFO" "against DPDK DSOs built from version $TAG2."
-log "INFO" ""
-
-# Check to make sure we have a clean tree
-git status | grep -q clean
-if [ $? -ne 0 ]
-then
- log "WARN" "Working directory not clean, aborting"
- cleanup_and_exit 1
+if [ "$hash1" = "$hash2" ]; then
+ log "ERROR" "$tag1 and $tag2 are the same revisions"
+ exit 1
fi
-# Move to the root of the git tree
-cd $(dirname $0)/..
+cmd mkdir -p ${dst}
-log "INFO" "Checking out version $TAG1 of the dpdk"
-# Move to the old version of the tree
-git checkout $HASH1
+# dump abi for each revision
+gen_abi ${hash1}
+gen_abi ${hash2}
-fixup_config
+# compare the abi dumps
+cmd cd ${dst}
+ret=0
+list=""
+for i in ${hash2}/*.dump; do
+ name=`basename $i`
+ libname=${name%.dump}
-# Checking abi compliance relies on using the dwarf information in
-# The shared objects. Thats only included in the DSO's if we build
-# with -g
-export EXTRA_CFLAGS="$EXTRA_CFLAGS -g -O0"
-export EXTRA_LDFLAGS="$EXTRA_LDFLAGS -g"
-
-# Now configure the build
-log "INFO" "Configuring DPDK $TAG1"
-make config T=$TARGET O=$TARGET > $VERBOSE 2>&1
-
-log "INFO" "Building DPDK $TAG1. This might take a moment"
-make -j$DPDK_MAKE_JOBS O=$TARGET > $VERBOSE 2>&1
-
-if [ $? -ne 0 ]
-then
- log "INFO" "THE BUILD FAILED. ABORTING"
- cleanup_and_exit 1
-fi
+ if [ ! -f ${hash1}/$name ]; then
+ log "INFO" "$NAME does not exist in $tag1. skipping..."
+ continue
+ fi
-# Move to the lib directory
-cd $TARGET/lib
-log "INFO" "COLLECTING ABI INFORMATION FOR $TAG1"
-for i in `ls *.so`
-do
- $ABIDUMP $i -o $ABI_DIR/$i-ABI-0.dump -lver $HASH1
+ local_ret=0
+ cmd $abicheck -l $libname \
+ -old ${hash1}/$name -new ${hash2}/$name || local_ret=$?
+ if [ $local_ret != 0 ]; then
+ log "NOTICE" "$abicheck returned $local_ret"
+ ret=$local_ret
+ list="$list $libname"
+ fi
done
-cd ../..
-
-# Now clean the tree, checkout the second tag, and rebuild
-git clean -f -d
-git reset --hard
-# Move to the new version of the tree
-log "INFO" "Checking out version $TAG2 of the dpdk"
-git checkout $HASH2
-
-fixup_config
-
-# Now configure the build
-log "INFO" "Configuring DPDK $TAG2"
-make config T=$TARGET O=$TARGET > $VERBOSE 2>&1
-
-log "INFO" "Building DPDK $TAG2. This might take a moment"
-make -j$DPDK_MAKE_JOBS O=$TARGET > $VERBOSE 2>&1
-if [ $? -ne 0 ]
-then
- log "INFO" "THE BUILD FAILED. ABORTING"
- cleanup_and_exit 1
+if [ $ret != 0 ]; then
+ log "NOTICE" "ABI may be incompatible, check reports/logs for details."
+ log "NOTICE" "Incompatible list: $list"
+else
+ log "NOTICE" "No error detected, ABI is compatible."
fi
-cd $TARGET/lib
-log "INFO" "COLLECTING ABI INFORMATION FOR $TAG2"
-for i in `ls *.so`
-do
- $ABIDUMP $i -o $ABI_DIR/$i-ABI-1.dump -lver $HASH2
-done
-cd ../..
-
-# Start comparison of ABI dumps
-for i in `ls $ABI_DIR/*-1.dump`
-do
- NEWNAME=`basename $i`
- OLDNAME=`basename $i | sed -e"s/1.dump/0.dump/"`
- LIBNAME=`basename $i | sed -e"s/-ABI-1.dump//"`
-
- if [ ! -f $ABI_DIR/$OLDNAME ]
- then
- log "INFO" "$OLDNAME DOES NOT EXIST IN $TAG1. SKIPPING..."
- fi
-
- #compare the abi dumps
- $ABICHECK -l $LIBNAME -old $ABI_DIR/$OLDNAME -new $ABI_DIR/$NEWNAME
-done
+log "INFO" "Logs are in ${dst}/abi-check.log"
+log "INFO" "HTML reports are in ${dst}/compat_reports directory"
-git reset --hard
-log "INFO" "ABI CHECK COMPLETE. REPORTS ARE IN compat_report directory"
-cleanup_and_exit 0
+exit $ret
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 19e0d4f3..34927023 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -41,8 +41,11 @@ The public API headers are grouped by topics:
[ethctrl] (@ref rte_eth_ctrl.h),
[rte_flow] (@ref rte_flow.h),
[rte_tm] (@ref rte_tm.h),
+ [rte_mtr] (@ref rte_mtr.h),
[cryptodev] (@ref rte_cryptodev.h),
+ [security] (@ref rte_security.h),
[eventdev] (@ref rte_eventdev.h),
+ [event_eth_rx_adapter] (@ref rte_event_eth_rx_adapter.h),
[metrics] (@ref rte_metrics.h),
[bitrate] (@ref rte_bitrate.h),
[latency] (@ref rte_latencystats.h),
@@ -50,11 +53,13 @@ The public API headers are grouped by topics:
[PCI] (@ref rte_pci.h)
- **device specific**:
+ [softnic] (@ref rte_eth_softnic.h),
[bond] (@ref rte_eth_bond.h),
[vhost] (@ref rte_vhost.h),
[KNI] (@ref rte_kni.h),
[ixgbe] (@ref rte_pmd_ixgbe.h),
[i40e] (@ref rte_pmd_i40e.h),
+ [bnxt] (@ref rte_pmd_bnxt.h),
[crypto_scheduler] (@ref rte_cryptodev_scheduler.h)
- **memory**:
@@ -96,16 +101,16 @@ The public API headers are grouped by topics:
[ethernet] (@ref rte_ether.h),
[ARP] (@ref rte_arp.h),
[ICMP] (@ref rte_icmp.h),
+ [ESP] (@ref rte_esp.h),
[IP] (@ref rte_ip.h),
[SCTP] (@ref rte_sctp.h),
[TCP] (@ref rte_tcp.h),
[UDP] (@ref rte_udp.h),
[GRO] (@ref rte_gro.h),
+ [GSO] (@ref rte_gso.h),
[frag/reass] (@ref rte_ip_frag.h),
[LPM IPv4 route] (@ref rte_lpm.h),
- [LPM IPv6 route] (@ref rte_lpm6.h),
- [ACL] (@ref rte_acl.h),
- [EFD] (@ref rte_efd.h)
+ [LPM IPv6 route] (@ref rte_lpm6.h)
- **QoS**:
[metering] (@ref rte_meter.h),
@@ -119,13 +124,19 @@ The public API headers are grouped by topics:
[FBK hash] (@ref rte_fbk_hash.h),
[CRC hash] (@ref rte_hash_crc.h)
+- **classification**
+ [reorder] (@ref rte_reorder.h),
+ [distributor] (@ref rte_distributor.h),
+ [EFD] (@ref rte_efd.h),
+ [ACL] (@ref rte_acl.h),
+ [member] (@ref rte_member.h),
+ [flow classify] (@ref rte_flow_classify.h)
+
- **containers**:
[mbuf] (@ref rte_mbuf.h),
[ring] (@ref rte_ring.h),
- [distributor] (@ref rte_distributor.h),
- [reorder] (@ref rte_reorder.h),
[tailq] (@ref rte_tailq.h),
- [bitmap] (@ref rte_bitmap.h),
+ [bitmap] (@ref rte_bitmap.h)
- **packet framework**:
* [port] (@ref rte_port.h):
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index 823554f4..b2cbe940 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -31,9 +31,11 @@
PROJECT_NAME = DPDK
INPUT = doc/api/doxy-api-index.md \
drivers/crypto/scheduler \
+ drivers/net/bnxt \
drivers/net/bonding \
drivers/net/i40e \
drivers/net/ixgbe \
+ drivers/net/softnic \
lib/librte_eal/common/include \
lib/librte_eal/common/include/generic \
lib/librte_acl \
@@ -46,7 +48,9 @@ INPUT = doc/api/doxy-api-index.md \
lib/librte_efd \
lib/librte_ether \
lib/librte_eventdev \
+ lib/librte_flow_classify \
lib/librte_gro \
+ lib/librte_gso \
lib/librte_hash \
lib/librte_ip_frag \
lib/librte_jobstats \
@@ -55,10 +59,12 @@ INPUT = doc/api/doxy-api-index.md \
lib/librte_latencystats \
lib/librte_lpm \
lib/librte_mbuf \
+ lib/librte_member \
lib/librte_mempool \
lib/librte_meter \
lib/librte_metrics \
lib/librte_net \
+ lib/librte_pci \
lib/librte_pdump \
lib/librte_pipeline \
lib/librte_port \
@@ -66,6 +72,7 @@ INPUT = doc/api/doxy-api-index.md \
lib/librte_reorder \
lib/librte_ring \
lib/librte_sched \
+ lib/librte_security \
lib/librte_table \
lib/librte_timer \
lib/librte_vhost
diff --git a/doc/guides/conf.py b/doc/guides/conf.py
index 39880752..31f914a8 100644
--- a/doc/guides/conf.py
+++ b/doc/guides/conf.py
@@ -340,7 +340,12 @@ def print_table_css(outfile, table_id):
font-size: 80%;
white-space: pre-wrap;
vertical-align: top;
- padding: 2px;
+ padding: 0.5em 0;
+ min-width: 0.9em;
+ width: 2em;
+ }
+ table#idx col:first-child {
+ width: 0;
}
table#idx th:first-child {
vertical-align: bottom;
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index cddbd7bb..170dacdb 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -34,7 +34,6 @@ The main directories that contain files related to documentation are shown below
|-- testpmd_app_ug
|-- rel_notes
|-- nics
- |-- xen
|-- ...
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index 27e218b2..40983c15 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -367,7 +367,6 @@ The builds can be modified via the following environmental variables:
* ``DPDK_BUILD_TEST_CONFIGS`` (target1+option1+option2 target2)
* ``DPDK_DEP_CFLAGS``
* ``DPDK_DEP_LDFLAGS``
-* ``DPDK_DEP_MOFED`` (y/[n])
* ``DPDK_DEP_PCAP`` (y/[n])
* ``DPDK_NOTIFY`` (notify-send)
@@ -404,6 +403,10 @@ The appropriate maintainer can be found in the ``MAINTAINERS`` file::
git send-email --to maintainer@some.org --cc dev@dpdk.org 000*.patch
+Script ``get-maintainer.sh`` can be used to select maintainers automatically::
+
+ git send-email --to-cmd ./devtools/get-maintainer.sh --cc dev@dpdk.org 000*.patch
+
New additions can be sent without a maintainer::
git send-email --to dev@dpdk.org 000*.patch
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index 8aaf370d..8d0fdb77 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -18,6 +18,8 @@ General Guidelines
#. The modification of symbols can generally be managed with versioning
#. The removal of symbols generally is an ABI break and requires bumping of the
LIBABIVER macro
+#. Updates to the minimum hardware requirements, which drop support for hardware which
+ was previously supported, should be treated as an ABI change.
What is an ABI
--------------
@@ -77,6 +79,13 @@ for significant reasons, such as performance enhancements. ABI breakage due to
changes such as reorganizing public structure fields for aesthetic or
readability purposes should be avoided.
+.. note::
+
+ Updates to the minimum hardware requirements, which drop support for hardware
+ which was previously supported, should be treated as an ABI change, and
+ follow the relevant deprecation policy procedures as above: 3 acks and
+ announcement at least one release in advance.
+
Examples of Deprecation Notices
-------------------------------
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 6e9160aa..a1f58483 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -62,8 +62,8 @@ Installation
To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.46, which
-can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
+The latest version of the library supported by this PMD is v0.47, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.47.zip>`_.
.. code-block:: console
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index b3b937fe..79e722c6 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -52,6 +52,8 @@ Cipher algorithms:
* RTE_CRYPTO_CIPHER_AES192_CTR
* RTE_CRYPTO_CIPHER_AES256_CTR
* RTE_CRYPTO_CIPHER_AES_DOCSISBPI
+* RTE_CRYPTO_CIPHER_DES_CBC
+* RTE_CRYPTO_CIPHER_DES_DOCSISBPI
Hash algorithms:
@@ -76,8 +78,8 @@ Installation
To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.46, which
-can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.46.zip>`_.
+The latest version of the library supported by this PMD is v0.47, which
+can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.47.zip>`_.
.. code-block:: console
@@ -90,13 +92,14 @@ and the Multi-Buffer library version supported by them:
.. table:: DPDK and Multi-Buffer library version compatibility
- ============= ============================
- DPDK version Multi-buffer library version
- ============= ============================
- 2.2 - 16.11 0.43 - 0.44
- 17.02 0.44
- 17.05+ 0.45+
- ============= ============================
+ ============== ============================
+ DPDK version Multi-buffer library version
+ ============== ============================
+ 2.2 - 16.11 0.43 - 0.44
+ 17.02 0.44
+ 17.05 - 17.08 0.45 - 0.47
+ 17.11+ 0.47
+ ============== ============================
Initialization
diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 18d980eb..13474c18 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -183,17 +183,17 @@ The following dependencies are not part of DPDK and must be installed separately
SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-* **DPDK Helper Scripts**
+* **DPDK Extra Scripts**
DPAA2 based resources can be configured easily with the help of ready scripts
as provided in the DPDK helper repository.
- `DPDK Helper Scripts <https://github.com/qoriq-open-source/dpdk-helper>`_.
+ `DPDK Extra Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
Currently supported by DPDK:
-* NXP SDK **2.0+**.
-* MC Firmware version **10.0.0** and higher.
+* NXP SDK **17.08+**.
+* MC Firmware version **10.3.1** and higher.
* Supported architectures: **arm64 LE**.
* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst
new file mode 100644
index 00000000..d3438cc3
--- /dev/null
+++ b/doc/guides/cryptodevs/dpaa_sec.rst
@@ -0,0 +1,182 @@
+.. BSD LICENSE
+ Copyright 2017 NXP.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+NXP DPAA CAAM (DPAA_SEC)
+========================
+
+The DPAA_SEC PMD provides poll mode crypto driver support for NXP DPAA CAAM
+hardware accelerator.
+
+Architecture
+------------
+
+SEC is the SOC's security engine, which serves as NXP's latest cryptographic
+acceleration and offloading hardware. It combines functions previously
+implemented in separate modules to create a modular and scalable acceleration
+and assurance engine. It also implements block encryption algorithms, stream
+cipher algorithms, hashing algorithms, public key algorithms, run-time
+integrity checking, and a hardware random number generator. SEC performs
+higher-level cryptographic operations than previous NXP cryptographic
+accelerators. This provides significant improvement to system level performance.
+
+DPAA_SEC is one of the hardware resource in DPAA Architecture. More information
+on DPAA Architecture is described in :ref:`dpaa_overview`.
+
+DPAA_SEC PMD is one of DPAA drivers which interacts with QBMAN to create,
+configure and destroy the device instance using queue pair with CAAM portal.
+
+DPAA_SEC PMD also uses some of the other hardware resources like buffer pools,
+queues, queue portals to store and to enqueue/dequeue data to the hardware SEC.
+
+Implementation
+--------------
+
+SEC provides platform assurance by working with SecMon, which is a companion
+logic block that tracks the security state of the SOC. SEC is programmed by
+means of descriptors (not to be confused with frame descriptors (FDs)) that
+indicate the operations to be performed and link to the message and
+associated data. SEC incorporates two DMA engines to fetch the descriptors,
+read the message data, and write the results of the operations. The DMA
+engine provides a scatter/gather capability so that SEC can read and write
+data scattered in memory. SEC may be configured by means of software for
+dynamic changes in byte ordering. The default configuration for this version
+of SEC is little-endian mode.
+
+Features
+--------
+
+The DPAA PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CBC``
+* ``RTE_CRYPTO_CIPHER_AES192_CBC``
+* ``RTE_CRYPTO_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_CIPHER_AES256_CTR``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Supported DPAA SoCs
+--------------------
+
+* LS1046A/LS1026A
+* LS1043A/LS1023A
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+* Hash followed by Cipher mode is not supported
+* Only supports the session-oriented API implementation (session-less APIs are not supported).
+
+Prerequisites
+-------------
+
+DPAA_SEC driver has similar pre-requisites as described in :ref:`dpaa_overview`.
+The following dependencies are not part of DPDK and must be installed separately:
+
+* **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for the family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+* **DPDK Extras Scripts**
+
+ DPAA based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extras repository.
+
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+* NXP SDK **2.0+**.
+* Supported architectures: **arm64 LE**.
+
+* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+Basic DPAA config file options are described in :ref:`dpaa_overview`.
+In addition to those, the following options can be modified in the ``config`` file
+to enable DPAA_SEC PMD.
+
+Please note that enabling debugging options may affect system performance.
+
+* ``CONFIG_RTE_LIBRTE_PMD_DPAA_SEC`` (default ``n``)
+ By default it is only enabled in defconfig_arm64-dpaa-* config.
+ Toggle compilation of the ``librte_pmd_dpaa_sec`` driver.
+
+* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT`` (default ``n``)
+ Toggle display of initialization related driver messages
+
+* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER`` (default ``n``)
+ Toggle display of driver runtime messages
+
+* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX`` (default ``n``)
+ Toggle display of receive fast path run-time message
+
+* ``CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS``
+ By default it is set as 2048 in defconfig_arm64-dpaa-* config.
+ It indicates Number of sessions to create in the session memory pool
+ on a single DPAA SEC device.
+
+Installations
+-------------
+To compile the DPAA_SEC PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-dpaa-linuxapp-gcc install
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index 03d8485d..fab07cb2 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -23,6 +23,9 @@ AES CTR (128) = Y
AES CTR (192) = Y
AES CTR (256) = Y
AES DOCSIS BPI = Y
+DES CBC = Y
+DES DOCSIS BPI = Y
+
;
; Supported authentication algorithms of the 'aesni_mb' crypto driver.
;
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 0926887b..18d66cb9 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -10,6 +10,7 @@ Symmetric crypto =
Asymmetric crypto =
Sym operation chaining =
HW Accelerated =
+Protocol offload =
CPU SSE =
CPU AVX =
CPU AVX2 =
@@ -68,3 +69,6 @@ ZUC EIA3 =
AES GCM (128) =
AES GCM (192) =
AES GCM (256) =
+AES CCM (128) =
+AES CCM (192) =
+AES CCM (256) =
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index c3bb3ddc..8fd07d6a 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -7,6 +7,7 @@
Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
+Protocol offload = Y
;
; Supported crypto algorithms of the 'dpaa2_sec' crypto driver.
diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
new file mode 100644
index 00000000..0e8f5b2a
--- /dev/null
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -0,0 +1,40 @@
+;
+; Supported features of the 'dpaa_sec' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+HW Accelerated = Y
+
+;
+; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+3DES CBC = Y
+
+;
+; Supported authentication algorithms of the 'dpaa_sec' crypto driver.
+;
+[Auth]
+MD5 HMAC = Y
+SHA1 HMAC = Y
+SHA224 HMAC = Y
+SHA256 HMAC = Y
+SHA384 HMAC = Y
+SHA512 HMAC = Y
+
+;
+; Supported AEAD algorithms of the 'dpaa_sec' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/mrvl.ini b/doc/guides/cryptodevs/features/mrvl.ini
new file mode 100644
index 00000000..6d2fe6aa
--- /dev/null
+++ b/doc/guides/cryptodevs/features/mrvl.ini
@@ -0,0 +1,42 @@
+; Supported features of the 'mrvl' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of a default crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+3DES CBC = Y
+3DES CTR = Y
+
+;
+; Supported authentication algorithms of a default crypto driver.
+;
+[Auth]
+MD5 = Y
+MD5 HMAC = Y
+SHA1 = Y
+SHA1 HMAC = Y
+SHA256 = Y
+SHA256 HMAC = Y
+SHA384 = Y
+SHA384 HMAC = Y
+SHA512 = Y
+SHA512 HMAC = Y
+AES GMAC = Y
+
+;
+; Supported AEAD algorithms of a default crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
diff --git a/doc/guides/cryptodevs/features/openssl.ini b/doc/guides/cryptodevs/features/openssl.ini
index aeb2a500..385ec4e0 100644
--- a/doc/guides/cryptodevs/features/openssl.ini
+++ b/doc/guides/cryptodevs/features/openssl.ini
@@ -45,3 +45,6 @@ AES GMAC = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+AES CCM (128) = Y
+AES CCM (192) = Y
+AES CCM (256) = Y
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 361b82dd..6d4e15b9 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -40,8 +40,10 @@ Crypto Device Drivers
aesni_gcm
armv8
dpaa2_sec
+ dpaa_sec
kasumi
openssl
+ mrvl
null
scheduler
snow3g
diff --git a/doc/guides/cryptodevs/mrvl.rst b/doc/guides/cryptodevs/mrvl.rst
new file mode 100644
index 00000000..4e992fbb
--- /dev/null
+++ b/doc/guides/cryptodevs/mrvl.rst
@@ -0,0 +1,206 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Marvell International Ltd.
+ Copyright(c) 2017 Semihalf.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+MRVL Crypto Poll Mode Driver
+============================
+
+The MRVL CRYPTO PMD (**librte_crypto_mrvl_pmd**) provides poll mode crypto driver
+support by utilizing MUSDK library, which provides cryptographic operations
+acceleration by using Security Acceleration Engine (EIP197) directly from
+user-space with minimum overhead and high performance.
+
+Features
+--------
+
+MRVL CRYPTO PMD has support for:
+
+* Symmetric crypto
+* Sym operation chaining
+* AES CBC (128)
+* AES CBC (192)
+* AES CBC (256)
+* AES CTR (128)
+* AES CTR (192)
+* AES CTR (256)
+* 3DES CBC
+* 3DES CTR
+* MD5
+* MD5 HMAC
+* SHA1
+* SHA1 HMAC
+* SHA256
+* SHA256 HMAC
+* SHA384
+* SHA384 HMAC
+* SHA512
+* SHA512 HMAC
+* AES GCM (128)
+
+Limitations
+-----------
+
+* Hardware only supports scenarios where ICV (digest buffer) is placed just
+ after the authenticated data. Other placement will result in error.
+
+* Before running crypto test suite it is advised to increase limit of
+ opened files:
+
+ .. code-block:: console
+
+ ulimit -n 20000
+
+Installation
+------------
+
+MRVL CRYPTO PMD driver compilation is disabled by default due to external dependencies.
+Currently there are two driver specific compilation options in
+``config/common_base`` available:
+
+- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO`` (default ``n``)
+
+ Toggle compilation of the librte_pmd_mrvl driver.
+
+- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO_DEBUG`` (default ``n``)
+
+ Toggle display of debugging messages.
+
+During compilation external MUSDK library, which provides direct access
+to Marvell's EIP197 cryptographic engine, is necessary. Library sources are
+available `here <https://github.com/MarvellEmbeddedProcessors/musdk-marvell/tree/musdk-armada-17.08>`__.
+
+Alternatively, prebuilt library can be downloaded from
+`Marvell Extranet <https://extranet.marvell.com>`_. Once approval has been
+granted, library can be found by typing ``musdk`` in the search box.
+
+For MUSDK library build instructions please refer to ``doc/musdk_get_started.txt``
+in library sources directory.
+
+MUSDK requires out of tree kernel modules to work. Kernel tree needed to build
+them is available
+`here <https://github.com/MarvellEmbeddedProcessors/linux-marvell/tree/linux-4.4.52-armada-17.08>`__.
+
+Initialization
+--------------
+
+After successfully building MRVL CRYPTO PMD, the following modules need to be
+loaded:
+
+.. code-block:: console
+
+ insmod musdk_uio.ko
+ insmod mvpp2x_sysfs.ko
+ insmod mv_pp_uio.ko
+ insmod mv_sam_uio.ko
+ insmod crypto_safexcel.ko
+
+- `musdk_uio.ko`, `mv_pp2_uio.ko` and `mv_sam_uio.ko` are distributed together with MUSDK library.
+- `crypto_safexcel.ko` is an in-kernel module.
+- `mvpp2x_sysfs.ko` can be build from sources available `here <https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell/tree/mvpp2x-armada-17.08>`__.
+
+The following parameters (all optional) are exported by the driver:
+
+* max_nb_queue_pairs: maximum number of queue pairs in the device (8 by default).
+* max_nb_sessions: maximum number of sessions that can be created (2048 by default).
+* socket_id: socket on which to allocate the device resources on.
+
+l2fwd-crypto example application can be used to verify MRVL CRYPTO PMD
+operation:
+
+.. code-block:: console
+
+ ./l2fwd-crypto --vdev=net_mrvl,iface=eth0 --vdev=crypto_mrvl -- \
+ --cipher_op ENCRYPT --cipher_algo aes-cbc \
+ --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
+ --auth_op GENERATE --auth_algo sha1-hmac \
+ --auth_key 10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f
+
+Example output:
+
+.. code-block:: console
+
+ [...]
+ AAD: at [0x7f253ceb80], len=
+ P ID 0 configuration ----
+ Port mode : KR
+ MAC status : disabled
+ Link status : link up
+ Port speed : 10G
+ Port duplex : full
+ Port: Egress enable tx_port_num=16 qmap=0x1
+ PORT: Port0 - link
+ P ID 0 configuration ----
+ Port mode : KR
+ MAC status : disabled
+ Link status : link down
+ Port speed : 10G
+ Port duplex : full
+ Port: Egress enable tx_port_num=16 qmap=0x1
+ Port 0, MAC address: 00:50:43:02:21:20
+
+
+ Checking link statusdone
+ Port 0 Link Up - speed 0 Mbps - full-duplex
+ Lcore 0: RX port 0
+ Allocated session pool on socket 0
+ eip197: 0:0 registers: paddr: 0xf2880000, vaddr: 0x0x7f56a80000
+ DMA buffer (131136 bytes) for CDR #0 allocated: paddr = 0xb0585e00, vaddr = 0x7f09384e00
+ DMA buffer (131136 bytes) for RDR #0 allocated: paddr = 0xb05a5f00, vaddr = 0x7f093a4f00
+ DMA buffers allocated for 2049 operations. Tokens - 256 bytes
+ Lcore 0: cryptodev 0
+ L2FWD: lcore 1 has nothing to do
+ L2FWD: lcore 2 has nothing to do
+ L2FWD: lcore 3 has nothing to do
+ L2FWD: entering main loop on lcore 0
+ L2FWD: -- lcoreid=0 portid=0
+ L2FWD: -- lcoreid=0 cryptoid=0
+ Options:-
+ nportmask: ffffffff
+ ports per lcore: 1
+ refresh period : 10000
+ single lcore mode: disabled
+ stats_printing: enabled
+ sessionless crypto: disabled
+
+ Crypto chain: Input --> Encrypt --> Auth generate --> Output
+
+ ---- Cipher information ---
+ Algorithm: aes-cbc
+ Cipher key: at [0x7f56db4e80], len=16
+ 00000000: 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F | ................
+ IV: at [0x7f56db4b80], len=16
+ 00000000: 20 F0 63 0E 45 EB 2D 84 72 D4 13 6E 36 B5 AF FE | .c.E.-.r..n6...
+
+ ---- Authentication information ---
+ Algorithm: sha1-hmac
+ Auth key: at [0x7f56db4d80], len=16
+ 00000000: 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F | ................
+ IV: at [0x7f56db4a80], len=0
+ AAD: at [0x7f253ceb80], len=
diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst
index f18a4567..243ea36a 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -67,6 +67,7 @@ Supported authentication algorithms:
Supported AEAD algorithms:
* ``RTE_CRYPTO_AEAD_AES_GCM``
+* ``RTE_CRYPTO_AEAD_AES_CCM``
Installation
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index a3fce7b8..cb17b6b3 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -90,6 +90,7 @@ Limitations
* No BSD support as BSD QAT kernel driver not available.
* ZUC EEA3/EIA3 is not supported by dh895xcc devices
* Maximum additional authenticated data (AAD) for GCM is 240 bytes long.
+* Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported).
Installation
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index b43d5155..7e601a07 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -149,3 +149,8 @@ Burst mode support
Burst mode is not supported. Dequeue and Enqueue functions accepts only single
event at a time.
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+When eth_octeontx is used as Rx adapter event schedule type
+``RTE_SCHED_TYPE_PARALLEL`` is not supported.
diff --git a/doc/guides/eventdevs/sw.rst b/doc/guides/eventdevs/sw.rst
index a3e66243..ec49b3b5 100644
--- a/doc/guides/eventdevs/sw.rst
+++ b/doc/guides/eventdevs/sw.rst
@@ -78,9 +78,9 @@ Scheduling Quanta
~~~~~~~~~~~~~~~~~
The scheduling quanta sets the number of events that the device attempts to
-schedule before returning to the application from the ``rte_event_schedule()``
-function. Note that is a *hint* only, and that fewer or more events may be
-scheduled in a given iteration.
+schedule in a single schedule call performed by the service core. Note that
+is a *hint* only, and that fewer or more events may be scheduled in a given
+iteration.
The scheduling quanta can be set using a string argument to the vdev
create call:
@@ -140,10 +140,9 @@ eventdev.
Distributed Scheduler
~~~~~~~~~~~~~~~~~~~~~
-The software eventdev is a centralized scheduler, requiring the
-``rte_event_schedule()`` function to be called by a CPU core to perform the
-required event distribution. This is not really a limitation but rather a
-design decision.
+The software eventdev is a centralized scheduler, requiring a service core to
+perform the required event distribution. This is not really a limitation but
+rather a design decision.
The ``RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED`` flag is not set in the
``event_dev_cap`` field of the ``rte_event_dev_info`` struct for the software
diff --git a/doc/guides/freebsd_gsg/build_sample_apps.rst b/doc/guides/freebsd_gsg/build_sample_apps.rst
index 9faa0e6e..d84f15b8 100644
--- a/doc/guides/freebsd_gsg/build_sample_apps.rst
+++ b/doc/guides/freebsd_gsg/build_sample_apps.rst
@@ -163,6 +163,9 @@ Other options, specific to Linux and are not supported under FreeBSD are as foll
* ``--huge-dir``:
The directory where hugetlbfs is mounted.
+* ``mbuf-pool-ops-name``:
+ Pool ops name for mbuf to use.
+
* ``--file-prefix``:
The prefix text used for hugepage filenames.
diff --git a/doc/guides/howto/index.rst b/doc/guides/howto/index.rst
index 468939b6..b5e8ac68 100644
--- a/doc/guides/howto/index.rst
+++ b/doc/guides/howto/index.rst
@@ -38,6 +38,7 @@ HowTo Guides
lm_bond_virtio_sriov
lm_virtio_vhost_user
flow_bifurcation
+ rte_flow
pvp_reference_benchmark
vfd
virtio_user_for_container_networking
diff --git a/doc/guides/howto/rte_flow.rst b/doc/guides/howto/rte_flow.rst
new file mode 100644
index 00000000..4a27c995
--- /dev/null
+++ b/doc/guides/howto/rte_flow.rst
@@ -0,0 +1,333 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Mellanox Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Mellanox Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Generic flow API - examples
+===========================
+
+This document demonstrates some concrete examples for programming flow rules
+with the ``rte_flow`` APIs.
+
+* Detail of the rte_flow APIs can be found in the following link:
+ :ref:`Generic flow API <Generic_flow_API>` .
+
+* Details of the TestPMD commands to set the flow rules can be found in the
+ following link: :ref:`TestPMD Flow rules <testpmd_rte_flow>`
+
+Simple IPv4 drop
+----------------
+
+Description
+~~~~~~~~~~~
+
+In this example we will create a simple rule that drops packets whose IPv4
+destination equals 192.168.3.2. This code is equivalent to the following
+testpmd command (wrapped for clarity)::
+
+ tpmd> flow create 0 ingress pattern eth / vlan /
+ ipv4 dst is 192.168.3.2 / end actions drop / end
+
+Code
+~~~~
+
+.. code-block:: c
+
+ /* create the attribute structure */
+ struct rte_flow_attr attr = {.ingress = 1};
+ struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
+ struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
+ struct rte_flow_item_etc eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ /* setting the eth to pass all packets */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth;
+
+ /* set the vlan to pas all packets */
+ pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan;
+
+ /* set the dst ipv4 packet to the required value */
+ ipv4.hdr.dst_addr = htonl(0xc0a80302);
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ipv4;
+
+ /* end the pattern array */
+ pattern[3].type = RTE_FLOW_ITEM)TYPE_END;
+
+ /* create the drop action */
+ actions[0].type = RTE_FLOW_ACTION_TYPE_DROP;
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /* validate and create the flow rule */
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+
+Output
+~~~~~~
+
+Terminal 1: running sample app with the flow rule disabled::
+
+ ./filter-program disable
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.5', dst='192.168.3.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.4
+ received packet with src ip = 176.80.50.5
+
+Terminal 1: running sample the app flow rule enabled::
+
+ ./filter-program enabled
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.5', dst ='192.168.3.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.4
+
+Range IPv4 drop
+----------------
+
+Description
+~~~~~~~~~~~
+
+In this example we will create a simple rule that drops packets whose IPv4
+destination is in the range 192.168.3.0 to 192.168.3.255. This is done using
+a mask.
+
+This code is equivalent to the following testpmd command (wrapped for
+clarity)::
+
+ tpmd> flow create 0 ingress pattern eth / vlan /
+ ipv4 dst spec 192.168.3.0 dst mask 255.255.255.0 /
+ end actions drop / end
+
+Code
+~~~~
+
+.. code-block:: c
+
+ struct rte_flow_attr attr = {.ingress = 1};
+ struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
+ struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
+ struct rte_flow_item_etc eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv4 ipv4_mask;
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ /* setting the eth to pass all packets */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth;
+
+ /* set the vlan to pas all packets */
+ pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan;
+
+ /* set the dst ipv4 packet to the required value */
+ ipv4.hdr.dst_addr = htonl(0xc0a80300);
+ ipv4_mask.hdr.dst_addr = htonl(0xffffff00);
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ipv4;
+ pattern[2].mask = &ipv4_mask;
+
+ /* end the pattern array */
+ pattern[3].type = RTE_FLOW_ITEM)TYPE_END;
+
+ /* create the drop action */
+ actions[0].type = RTE_FLOW_ACTION_TYPE_DROP;
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /* validate and create the flow rule */
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+
+Output
+~~~~~~
+
+Terminal 1: running sample app flow rule disabled::
+
+ ./filter-program disable
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.5', dst='192.168.3.2'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.6', dst='192.168.5.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.4
+ received packet with src ip = 176.80.50.5
+ received packet with src ip = 176.80.50.6
+
+Terminal 1: running sample app flow rule enabled::
+
+ ./filter-program enabled
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.5', dst='192.168.3.2'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q()/IP(src='176.80.50.6', dst='192.168.5.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.6
+
+Send vlan to queue
+------------------
+
+Description
+~~~~~~~~~~~
+
+In this example we will create a rule that routes all vlan id 123 to queue 3.
+
+This code is equivalent to the following testpmd command (wrapped for
+clarity)::
+
+ tpmd> flow create 0 ingress pattern eth / vlan vid spec 123 /
+ end actions queue index 3 / end
+
+Code
+~~~~
+
+.. code-block:: c
+
+ struct rte_flow_attr attr = {.ingress = 1};
+ struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
+ struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
+ struct rte_flow_item_etc eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_action_queue queue = { .index = 3 };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ /* setting the eth to pass all packets */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth;
+
+ /* set the vlan to pas all packets */
+ vlan.vid = 123;
+ pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan;
+
+ /* end the pattern array */
+ pattern[2].type = RTE_FLOW_ITEM)TYPE_END;
+
+ /* create the drop action */
+ actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ actions[0].conf = &queue
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /* validate and create the flow rule */
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+
+Output
+~~~~~~
+
+Terminal 1: running sample app flow rule disabled::
+
+ ./filter-program disable
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q(vlan=123)/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q(vlan=50)/IP(src='176.80.50.5', dst='192.168.3.2'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q(vlan=123)/IP(src='176.80.50.6', dst='192.168.5.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.4 sent to queue 2
+ received packet with src ip = 176.80.50.5 sent to queue 1
+ received packet with src ip = 176.80.50.6 sent to queue 0
+
+Terminal 1: running sample app flow rule enabled::
+
+ ./filter-program enabled
+ [waiting for packets]
+
+Terminal 2: running scapy::
+
+ $scapy
+ welcome to Scapy
+ >> sendp(Ether()/Dot1Q(vlan=123)/IP(src='176.80.50.4', dst='192.168.3.1'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q(vlan=50)/IP(src='176.80.50.5', dst='192.168.3.2'), \
+ iface='some interface', count=1)
+ >> sendp(Ether()/Dot1Q(vlan=123)/IP(src='176.80.50.6', dst='192.168.5.2'), \
+ iface='some interface', count=1)
+
+Terminal 1: output log::
+
+ received packet with src ip = 176.80.50.4 sent to queue 3
+ received packet with src ip = 176.80.50.5 sent to queue 1
+ received packet with src ip = 176.80.50.6 sent to queue 3
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 63716b09..5b6eb7ec 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -44,7 +44,6 @@ DPDK documentation
nics/index
cryptodevs/index
eventdevs/index
- xen/index
contributing/index
rel_notes/index
faq/index
diff --git a/doc/guides/linux_gsg/build_dpdk.rst b/doc/guides/linux_gsg/build_dpdk.rst
index e32afd5f..5452041f 100644
--- a/doc/guides/linux_gsg/build_dpdk.rst
+++ b/doc/guides/linux_gsg/build_dpdk.rst
@@ -69,9 +69,9 @@ The format of a DPDK target is::
where:
-* ``ARCH`` can be: ``i686``, ``x86_64``, ``ppc_64``
+* ``ARCH`` can be: ``i686``, ``x86_64``, ``ppc_64``, ``arm64``
-* ``MACHINE`` can be: ``native``, ``power8``
+* ``MACHINE`` can be: ``native``, ``power8``, ``armv8a``
* ``EXECENV`` can be: ``linuxapp``, ``bsdapp``
diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst
index 0cc5fd17..594cbc59 100644
--- a/doc/guides/linux_gsg/build_sample_apps.rst
+++ b/doc/guides/linux_gsg/build_sample_apps.rst
@@ -116,7 +116,7 @@ The following is the list of options that can be given to the EAL:
./rte-app [-c COREMASK | -l CORELIST] [-n NUM] [-b <domain:bus:devid.func>] \
[--socket-mem=MB,...] [-d LIB.so|DIR] [-m MB] [-r NUM] [-v] [--file-prefix] \
- [--proc-type <primary|secondary|auto>] [-- xen-dom0]
+ [--proc-type <primary|secondary|auto>]
The EAL options are as follows:
@@ -157,15 +157,15 @@ The EAL options are as follows:
* ``--huge-dir``:
The directory where hugetlbfs is mounted.
+* ``mbuf-pool-ops-name``:
+ Pool ops name for mbuf to use.
+
* ``--file-prefix``:
The prefix text used for hugepage filenames.
* ``--proc-type``:
The type of process instance.
-* ``--xen-dom0``:
- Support application running on Xen Domain0 without hugetlbfs.
-
* ``--vmware-tsc-map``:
Use VMware TSC map instead of native RDTSC.
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index eb8442c6..3e7fe637 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -228,56 +228,3 @@ The mount point can be made permanent across reboots, by adding the following li
For 1GB pages, the page size must be specified as a mount option::
nodev /mnt/huge_1GB hugetlbfs pagesize=1GB 0 0
-
-Xen Domain0 Support in the Linux Environment
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The existing memory management implementation is based on the Linux kernel hugepage mechanism.
-On the Xen hypervisor, hugepage support for DomainU (DomU) Guests means that DPDK applications work as normal for guests.
-
-However, Domain0 (Dom0) does not support hugepages.
-To work around this limitation, a new kernel module rte_dom0_mm is added to facilitate the allocation and mapping of memory via
-**IOCTL** (allocation) and **MMAP** (mapping).
-
-Enabling Xen Dom0 Mode in the DPDK
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-By default, Xen Dom0 mode is disabled in the DPDK build configuration files.
-To support Xen Dom0, the CONFIG_RTE_LIBRTE_XEN_DOM0 setting should be changed to “y”, which enables the Xen Dom0 mode at compile time.
-
-Furthermore, the CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID setting should also be changed to “y” in the case of the wrong socket ID being received.
-
-Loading the DPDK rte_dom0_mm Module
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To run any DPDK application on Xen Dom0, the ``rte_dom0_mm`` module must be loaded into the running kernel with rsv_memsize option.
-The module is found in the kmod sub-directory of the DPDK target directory.
-This module should be loaded using the insmod command as shown below (assuming that the current directory is the DPDK target directory)::
-
- sudo insmod kmod/rte_dom0_mm.ko rsv_memsize=X
-
-The value X cannot be greater than 4096(MB).
-
-Configuring Memory for DPDK Use
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-After the rte_dom0_mm.ko kernel module has been loaded, the user must configure the memory size for DPDK usage.
-This is done by echoing the memory size to a memsize file in the /sys/devices/ directory.
-Use the following command (assuming that 2048 MB is required)::
-
- echo 2048 > /sys/kernel/mm/dom0-mm/memsize-mB/memsize
-
-The user can also check how much memory has already been used::
-
- cat /sys/kernel/mm/dom0-mm/memsize-mB/memsize_rsvd
-
-Xen Domain0 does not support NUMA configuration, as a result the ``--socket-mem`` command line option is invalid for Xen Domain0.
-
-.. note::
-
- The memsize value cannot be greater than the rsv_memsize value.
-
-Running the DPDK Application on Xen Domain0
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To run the DPDK application on Xen Domain0, an extra command line option ``--xen-dom0`` is required.
diff --git a/doc/guides/nics/ark.rst b/doc/guides/nics/ark.rst
index a7c2590b..6c135eeb 100644
--- a/doc/guides/nics/ark.rst
+++ b/doc/guides/nics/ark.rst
@@ -146,9 +146,6 @@ Data Path Interface
Ingress RX and Egress TX operation is by the nominal DPDK API .
The driver supports single-port, multi-queue for both RX and TX.
-Refer to ``ark_ethdev.h`` for the list of supported methods to
-act upon RX and TX Queues.
-
Configuration Information
-------------------------
diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
new file mode 100644
index 00000000..d331c053
--- /dev/null
+++ b/doc/guides/nics/dpaa.rst
@@ -0,0 +1,378 @@
+.. BSD LICENSE
+ Copyright 2017 NXP.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+DPAA Poll Mode Driver
+=====================
+
+The DPAA NIC PMD (**librte_pmd_dpaa**) provides poll mode driver
+support for the inbuilt NIC found in the **NXP DPAA** SoC family.
+
+More information can be found at `NXP Official Website
+<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
+
+NXP DPAA (Data Path Acceleration Architecture - Gen 1)
+------------------------------------------------------
+
+This section provides an overview of the NXP DPAA architecture
+and how it is integrated into the DPDK.
+
+Contents summary
+
+- DPAA overview
+- DPAA driver architecture overview
+
+.. _dpaa_overview:
+
+DPAA Overview
+~~~~~~~~~~~~~
+
+Reference: `FSL DPAA Architecture <http://www.nxp.com/assets/documents/data/en/white-papers/QORIQDPAAWP.pdf>`_.
+
+The QorIQ Data Path Acceleration Architecture (DPAA) is a set of hardware
+components on specific QorIQ series multicore processors. This architecture
+provides the infrastructure to support simplified sharing of networking
+interfaces and accelerators by multiple CPU cores, and the accelerators
+themselves.
+
+DPAA includes:
+
+- Cores
+- Network and packet I/O
+- Hardware offload accelerators
+- Infrastructure required to facilitate flow of packets between the components above
+
+Infrastructure components are:
+
+- The Queue Manager (QMan) is a hardware accelerator that manages frame queues.
+ It allows CPUs and other accelerators connected to the SoC datapath to
+ enqueue and dequeue ethernet frames, thus providing the infrastructure for
+ data exchange among CPUs and datapath accelerators.
+- The Buffer Manager (BMan) is a hardware buffer pool management block that
+ allows software and accelerators on the datapath to acquire and release
+ buffers in order to build frames.
+
+Hardware accelerators are:
+
+- SEC - Cryptographic accelerator
+- PME - Pattern matching engine
+
+The Network and packet I/O component:
+
+- The Frame Manager (FMan) is a key component in the DPAA and makes use of the
+ DPAA infrastructure (QMan and BMan). FMan is responsible for packet
+ distribution and policing. Each frame can be parsed, classified and results
+ may be attached to the frame. This meta data can be used to select
+ particular QMan queue, which the packet is forwarded to.
+
+
+DPAA DPDK - Poll Mode Driver Overview
+-------------------------------------
+
+This section provides an overview of the drivers for DPAA:
+
+* Bus driver and associated "DPAA infrastructure" drivers
+* Functional object drivers (such as Ethernet).
+
+Brief description of each driver is provided in layout below as well as
+in the following sections.
+
+.. code-block:: console
+
+ +------------+
+ | DPDK DPAA |
+ | PMD |
+ +-----+------+
+ |
+ +-----+------+ +---------------+
+ : Ethernet :.......| DPDK DPAA |
+ . . . . . . . . . : (FMAN) : | Mempool driver|
+ . +---+---+----+ | (BMAN) |
+ . ^ | +-----+---------+
+ . | |<enqueue, .
+ . | | dequeue> .
+ . | | .
+ . +---+---V----+ .
+ . . . . . . . . . . .: Portal drv : .
+ . . : : .
+ . . +-----+------+ .
+ . . : QMAN : .
+ . . : Driver : .
+ +----+------+-------+ +-----+------+ .
+ | DPDK DPAA Bus | | .
+ | driver |....................|.....................
+ | /bus/dpaa | |
+ +-------------------+ |
+ |
+ ========================== HARDWARE =====|========================
+ PHY
+ =========================================|========================
+
+In the above representation, solid lines represent components which interface
+with DPDK RTE Framework and dotted lines represent DPAA internal components.
+
+DPAA Bus driver
+~~~~~~~~~~~~~~~
+
+The DPAA bus driver is a ``rte_bus`` driver which scans the platform like bus.
+Key functions include:
+
+- Scanning and parsing the various objects and adding them to their respective
+ device list.
+- Performing probe for available drivers against each scanned device
+- Creating necessary ethernet instance before passing control to the PMD
+
+DPAA NIC Driver (PMD)
+~~~~~~~~~~~~~~~~~~~~~
+
+DPAA PMD is traditional DPDK PMD which provides necessary interface between
+RTE framework and DPAA internal components/drivers.
+
+- Once devices have been identified by DPAA Bus, each device is associated
+ with the PMD
+- PMD is responsible for implementing necessary glue layer between RTE APIs
+ and lower level QMan and FMan blocks.
+ The Ethernet driver is bound to a FMAN port and implements the interfaces
+ needed to connect the DPAA network interface to the network stack.
+ Each FMAN Port corresponds to a DPDK network interface.
+
+
+Features
+^^^^^^^^
+
+ Features of the DPAA PMD are:
+
+ - Multiple queues for TX and RX
+ - Receive Side Scaling (RSS)
+ - Packet type information
+ - Checksum offload
+ - Promiscuous mode
+
+DPAA Mempool Driver
+~~~~~~~~~~~~~~~~~~~
+
+DPAA has a hardware offloaded buffer pool manager, called BMan, or Buffer
+Manager.
+
+- Using standard Mempools operations RTE API, the mempool driver interfaces
+ with RTE to service each mempool creation, deletion, buffer allocation and
+ deallocation requests.
+- Each FMAN instance has a BMan pool attached to it during initialization.
+ Each Tx frame can be automatically released by hardware, if allocated from
+ this pool.
+
+
+Supported DPAA SoCs
+-------------------
+
+- LS1043A/LS1023A
+- LS1046A/LS1026A
+
+Prerequisites
+-------------
+
+There are three main pre-requisities for executing DPAA PMD on a DPAA
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+4. **FMC Tool**
+
+ Before any DPDK application can be executed, the Frame Manager Configuration
+ Tool (FMC) need to be executed to set the configurations of the queues. This
+ includes the queue state, RSS and other policies.
+ This tool can be obtained from `NXP (Freescale) Public Git Repository <https://github.com/qoriq-open-source/fmc>`_.
+
+ This tool needs configuration files which are available in the
+ :ref:`DPDK Extra Scripts <extra_scripts>`, described below for DPDK usages.
+
+As an alternative method, DPAA PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+
+.. _extra_scripts:
+
+- **DPDK Extra Scripts**
+
+ DPAA based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+**.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
+ to setup the basic DPDK environment.
+
+.. note::
+
+ Some part of dpaa bus code (qbman and fman - library) routines are
+ dual licensed (BSD & GPLv2), however they are used as BSD in DPDK in userspace.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_DPAA_BUS`` (default ``n``)
+
+ By default it is enabled only for defconfig_arm64-dpaa-* config.
+ Toggle compilation of the ``librte_bus_dpaa`` driver.
+
+- ``CONFIG_RTE_LIBRTE_DPAA_PMD`` (default ``n``)
+
+ By default it is enabled only for defconfig_arm64-dpaa-* config.
+ Toggle compilation of the ``librte_pmd_dpaa`` driver.
+
+- ``CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER`` (default ``n``)
+
+ Toggles display of bus configurations and enables a debugging queue
+ to fetch error (Rx/Tx) packets to driver. By default, packets with errors
+ (like wrong checksum) are dropped by the hardware.
+
+- ``CONFIG_RTE_LIBRTE_DPAA_HWDEBUG`` (default ``n``)
+
+ Enables debugging of the Queue and Buffer Manager layer which interacts
+ with the DPAA hardware.
+
+- ``CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS`` (default ``dpaa``)
+
+ This is not a DPAA specific configuration - it is a generic RTE config.
+ For optimal performance and hardware utilization, it is expected that DPAA
+ Mempool driver is used for mempools. For that, this configuration needs to
+ enabled.
+
+Environment Variables
+~~~~~~~~~~~~~~~~~~~~~
+
+DPAA drivers uses the following environment variables to configure its
+state during application initialization:
+
+- ``DPAA_NUM_RX_QUEUES`` (default 1)
+
+ This defines the number of Rx queues configured for an application, per
+ port. Hardware would distribute across these many number of queues on Rx
+ of packets.
+ In case the application is configured to use lesser number of queues than
+ configured above, it might result in packet loss (because of distribution).
+
+
+Driver compilation and testing
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ ./arm64-dpaa-linuxapp-gcc/testpmd -c 0xff -n 1 \
+ -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx
+
+ .....
+ EAL: Registered [pci] bus.
+ EAL: Registered [dpaa] bus.
+ EAL: Detected 4 lcore(s)
+ .....
+ EAL: dpaa: Bus scan completed
+ .....
+ Configuring Port 0 (socket 0)
+ Port 0: 00:00:00:00:00:01
+ Configuring Port 1 (socket 0)
+ Port 1: 00:00:00:00:00:02
+ .....
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
+
+Limitations
+-----------
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+
+DPAA drivers for DPDK can only work on NXP SoCs as listed in the
+``Supported DPAA SoCs``.
+
+Maximum packet length
+~~~~~~~~~~~~~~~~~~~~~
+
+The DPAA SoC family support a maximum of a 10240 jumbo frame. The value
+is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
+up to 10240 bytes can still reach the host interface.
+
+Multiprocess Support
+~~~~~~~~~~~~~~~~~~~~
+
+Current version of DPAA driver doesn't support multi-process applications
+where I/O is performed using secondary processes. This feature would be
+implemented in subsequent versions.
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index d6b1f21e..85bc0488 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -448,7 +448,7 @@ compatible board:
1. **ARM 64 Tool Chain**
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
2. **Linux Kernel**
@@ -487,8 +487,8 @@ separately:
Currently supported by DPDK:
-- NXP SDK **2.0+**.
-- MC Firmware version **10.0.0** and higher.
+- NXP SDK **17.08+**.
+- MC Firmware version **10.3.1** and higher.
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
@@ -496,7 +496,7 @@ Currently supported by DPDK:
.. note::
Some part of fslmc bus code (mc flib - object library) routines are
- dual licensed (BSD & GPLv2).
+ dual licensed (BSD & GPLv2), however they are used as BSD in DPDK in userspace.
Pre-Installation Configuration
------------------------------
diff --git a/doc/guides/nics/fail_safe.rst b/doc/guides/nics/fail_safe.rst
index 34ecae2d..c4e3d2e8 100644
--- a/doc/guides/nics/fail_safe.rst
+++ b/doc/guides/nics/fail_safe.rst
@@ -136,7 +136,7 @@ This section shows some example of using **testpmd** with a fail-safe PMD.
.. code-block:: console
$RTE_TARGET/build/app/testpmd -c 0xff -n 4 \
- --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)'
+ --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)' \
-b 84:00.0 -b 00:04.0 -- -i
If the slave device being used is not blacklisted, it will be probed by the
@@ -150,7 +150,7 @@ This section shows some example of using **testpmd** with a fail-safe PMD.
.. code-block:: console
$RTE_TARGET/build/app/testpmd -c 0xff -n 4 \
- --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)'
+ --vdev 'net_failsafe0,mac=de:ad:be:ef:01:02,dev(84:00.0),dev(net_ring0)' \
-w 81:00.0 -- -i
#. Start testpmd using a flexible device definition
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index 37ffbc68..bfeae80e 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -131,7 +131,8 @@ Lock-free Tx queue
If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
* **[related] API**: ``rte_eth_tx_burst()``.
@@ -179,7 +180,7 @@ Jumbo frame
Supports Rx jumbo frames.
-* **[uses] user config**: ``dev_conf.rxmode.jumbo_frame``,
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
``dev_conf.rxmode.max_rx_pkt_len``.
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
* **[related] API**: ``rte_eth_dev_set_mtu()``.
@@ -192,7 +193,7 @@ Scattered Rx
Supports receiving segmented mbufs.
-* **[uses] user config**: ``dev_conf.rxmode.enable_scatter``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
* **[implements] datapath**: ``Scattered Rx function``.
* **[implements] rte_eth_dev_data**: ``scattered_rx``.
* **[provides] eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@@ -206,11 +207,11 @@ LRO
Supports Large Receive Offload.
-* **[uses] user config**: ``dev_conf.rxmode.enable_lro``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
.. _nic_features_tso:
@@ -220,11 +221,12 @@ TSO
Supports TCP Segmentation Offloading.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_TCP_SEG``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
-* **[provides] rte_eth_dev_info**: ``tx_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
.. _nic_features_promiscuous_mode:
@@ -363,7 +365,7 @@ VLAN filter
Supports filtering of a VLAN Tag identifier.
-* **[uses] user config**: ``dev_conf.rxmode.hw_vlan_filter``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
* **[implements] eth_dev_ops**: ``vlan_filter_set``.
* **[related] API**: ``rte_eth_dev_vlan_filter()``.
@@ -499,7 +501,7 @@ CRC offload
Supports CRC stripping by hardware.
-* **[uses] user config**: ``dev_conf.rxmode.hw_strip_crc``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_CRC_STRIP``.
.. _nic_features_vlan_offload:
@@ -509,12 +511,12 @@ VLAN offload
Supports VLAN offload to hardware.
-* **[uses] user config**: ``dev_conf.rxmode.hw_vlan_strip``,
- ``dev_conf.rxmode.hw_vlan_filter``, ``dev_conf.rxmode.hw_vlan_extend``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.vlan_tci``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
``rte_eth_dev_get_vlan_offload()``.
@@ -526,11 +528,13 @@ QinQ offload
Supports QinQ (queue in queue) offload.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ_PKT``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.vlan_tci``,
``mbuf.vlan_tci_outer``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
.. _nic_features_l3_checksum_offload:
@@ -540,14 +544,15 @@ L3 checksum offload
Supports L3 checksum offload.
-* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
.. _nic_features_l4_checksum_offload:
@@ -557,15 +562,29 @@ L4 checksum offload
Supports L4 checksum offload.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
+
+.. _nic_features_hw_timestamp:
+
+Timestamp offload
+-----------------
+Supports Timestamp.
+
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
+* **[provides] mbuf**: ``mbuf.timestamp``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
.. _nic_features_macsec_offload:
@@ -574,9 +593,11 @@ MACsec offload
Supports MACsec.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
.. _nic_features_inner_l3_checksum:
@@ -586,14 +607,16 @@ Inner L3 checksum
Supports inner packet L3 checksum.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_EIP_CKSUM_BAD``.
-* **[provides] rte_eth_dev_info**: ``rx_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
- ``tx_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
+* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
+ ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
.. _nic_features_inner_l4_checksum:
@@ -617,6 +640,15 @@ Supports packet type parsing and returns a list of supported types.
.. _nic_features_timesync:
+Mbuf fast free
+--------------
+
+Supports optimization for fast release of mbufs following successful Tx.
+Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
+
+* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
+
Timesync
--------
diff --git a/doc/guides/nics/features/bnxt.ini b/doc/guides/nics/features/bnxt.ini
index 119132e1..ef45dd76 100644
--- a/doc/guides/nics/features/bnxt.ini
+++ b/doc/guides/nics/features/bnxt.ini
@@ -5,21 +5,38 @@
;
[Features]
Link status = Y
+Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
LRO = Y
+TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
+RSS hash = Y
+RSS key update = Y
RSS reta update = Y
+VMDq = Y
SR-IOV = Y
VLAN filter = Y
+Ethertype filter = Y
+N-tuple filter = Y
+Flow control = Y
+Flow API = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
+Packet type parsing = Y
VLAN offload = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
FW version = Y
+EEPROM dump = Y
LED = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index 54243069..dc527ddf 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -48,6 +48,7 @@ VLAN offload =
QinQ offload =
L3 checksum offload =
L4 checksum offload =
+Timestamp offload =
MACsec offload =
Inner L3 checksum =
Inner L4 checksum =
@@ -75,3 +76,4 @@ x86-64 =
Usage doc =
Design doc =
Perf doc =
+Mbuf fast free =
diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
new file mode 100644
index 00000000..24cfd856
--- /dev/null
+++ b/doc/guides/nics/features/dpaa.ini
@@ -0,0 +1,24 @@
+;
+; Supported features of the 'dpaa' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = P
+Link status = Y
+Jumbo frame = Y
+MTU update = Y
+Scattered Rx = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+RSS hash = Y
+Flow control = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Packet type parsing = Y
+Basic stats = Y
+Extended stats = Y
+FW version = Y
+ARMv8 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/dpaa2.ini b/doc/guides/nics/features/dpaa2.ini
index 146e087f..6ebbab4b 100644
--- a/doc/guides/nics/features/dpaa2.ini
+++ b/doc/guides/nics/features/dpaa2.ini
@@ -6,6 +6,7 @@
[Features]
Speed capabilities = P
Link status = Y
+Link status event = Y
Queue start/stop = Y
Jumbo frame = Y
MTU update = Y
@@ -20,6 +21,7 @@ L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Basic stats = Y
+Extended stats = Y
FW version = Y
Linux VFIO = Y
ARMv8 = Y
diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini
index 0de3ef53..498341f0 100644
--- a/doc/guides/nics/features/enic.ini
+++ b/doc/guides/nics/features/enic.ini
@@ -25,6 +25,7 @@ L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Basic stats = Y
+Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/features/fm10k_vec.ini b/doc/guides/nics/features/fm10k_vec.ini
deleted file mode 100644
index 4917e820..00000000
--- a/doc/guides/nics/features/fm10k_vec.ini
+++ /dev/null
@@ -1,35 +0,0 @@
-;
-; Supported features of the 'fm10k_vec' network poll mode driver.
-;
-; Refer to default.ini for the full list of available PMD features.
-;
-[Features]
-Speed capabilities = P
-Rx interrupt = Y
-Queue start/stop = Y
-Jumbo frame = Y
-Scattered Rx = Y
-TSO = Y
-Promiscuous mode = Y
-Allmulticast mode = Y
-Unicast MAC filter = Y
-Multicast MAC filter = Y
-RSS hash = Y
-RSS key update = Y
-RSS reta update = Y
-VMDq = Y
-VLAN filter = Y
-CRC offload = Y
-VLAN offload = Y
-L3 checksum offload = Y
-L4 checksum offload = Y
-Packet type parsing = Y
-Basic stats = Y
-Extended stats = Y
-Stats per queue = Y
-Multiprocess aware = Y
-BSD nic_uio = Y
-Linux UIO = Y
-Linux VFIO = Y
-x86-32 = Y
-x86-64 = Y
diff --git a/doc/guides/nics/features/liquidio.ini b/doc/guides/nics/features/liquidio.ini
index 3bea03a3..f628b764 100644
--- a/doc/guides/nics/features/liquidio.ini
+++ b/doc/guides/nics/features/liquidio.ini
@@ -7,8 +7,10 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
+Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
RSS key update = Y
diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini
index 1d5f2668..f6efd21d 100644
--- a/doc/guides/nics/features/mlx4.ini
+++ b/doc/guides/nics/features/mlx4.ini
@@ -27,7 +27,6 @@ Inner L4 checksum = Y
Packet type parsing = Y
Basic stats = Y
Stats per queue = Y
-Multiprocess aware = Y
Other kdrv = Y
Power8 = Y
x86-32 = Y
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 2913591c..c3636391 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+Removal event = Y
Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
@@ -38,6 +39,5 @@ Multiprocess aware = Y
Other kdrv = Y
ARMv8 = Y
Power8 = Y
-x86-32 = Y
x86-64 = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/mrvl.ini b/doc/guides/nics/features/mrvl.ini
new file mode 100644
index 00000000..00d96218
--- /dev/null
+++ b/doc/guides/nics/features/mrvl.ini
@@ -0,0 +1,23 @@
+;
+; Supported features of the 'mrvl' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+MTU update = Y
+Jumbo frame = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+Multicast MAC filter = Y
+RSS hash = Y
+VLAN filter = Y
+CRC offload = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Packet type parsing = Y
+Basic stats = Y
+ARMv8 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp_pf.ini
index a1281d2a..d2899e7f 100644
--- a/doc/guides/nics/features/nfp.ini
+++ b/doc/guides/nics/features/nfp_pf.ini
@@ -16,7 +16,6 @@ TSO = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
-SR-IOV = Y
Flow control = Y
VLAN offload = Y
L3 checksum offload = Y
diff --git a/doc/guides/nics/features/fm10k_vf_vec.ini b/doc/guides/nics/features/nfp_vf.ini
index 6f4a639a..d2899e7f 100644
--- a/doc/guides/nics/features/fm10k_vf_vec.ini
+++ b/doc/guides/nics/features/nfp_vf.ini
@@ -1,29 +1,28 @@
;
-; Supported features of the 'fm10kvf_vec' network poll mode driver.
+; Supported features of the 'nfp' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
-Speed capabilities = P
+Speed capabilities = Y
+Link status = Y
+Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
+MTU update = Y
Jumbo frame = Y
-Scattered Rx = Y
+Promiscuous mode = Y
TSO = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
-CRC offload = Y
+Flow control = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
-Packet type parsing = Y
Basic stats = Y
-Extended stats = Y
Stats per queue = Y
-Multiprocess aware = Y
-BSD nic_uio = Y
Linux UIO = Y
Linux VFIO = Y
-x86-32 = Y
x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/octeontx.ini b/doc/guides/nics/features/octeontx.ini
new file mode 100644
index 00000000..c92d5aa0
--- /dev/null
+++ b/doc/guides/nics/features/octeontx.ini
@@ -0,0 +1,19 @@
+;
+; Supported features of the 'octeontx' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+Lock-free Tx queue = Y
+Queue start/stop = P
+Jumbo frame = Y
+Promiscuous mode = Y
+Unicast MAC filter = P
+CRC offload = Y
+Packet type parsing = Y
+Basic stats = Y
+Linux VFIO = Y
+ARMv8 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/sfc_efx.ini b/doc/guides/nics/features/sfc_efx.ini
index 1db7f67b..03890f30 100644
--- a/doc/guides/nics/features/sfc_efx.ini
+++ b/doc/guides/nics/features/sfc_efx.ini
@@ -25,6 +25,8 @@ VLAN offload = P
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
FW version = Y
diff --git a/doc/guides/nics/features/thunderx.ini b/doc/guides/nics/features/thunderx.ini
index b9720be6..62685897 100644
--- a/doc/guides/nics/features/thunderx.ini
+++ b/doc/guides/nics/features/thunderx.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
Link status = Y
Link status event = Y
Queue start/stop = Y
diff --git a/doc/guides/nics/features/xenvirt.ini b/doc/guides/nics/features/xenvirt.ini
deleted file mode 100644
index 8ab5f465..00000000
--- a/doc/guides/nics/features/xenvirt.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-;
-; Supported features of the 'xenvirt' network poll mode driver.
-;
-; Refer to default.ini for the full list of available PMD features.
-;
-[Features]
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index 7fc48624..b47fc0db 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -161,6 +161,17 @@ FM10000 PMD driver. The switch driver can be acquired from Intel support.
Only Testpoint is validated with DPDK, the latest version that has been
validated with DPDK is 4.1.6.
+Support for Switch Restart
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For FM10000 multi host based design a DPDK app running in the VM or host needs
+to be aware of the switch's state since it may undergo a quit-restart. When
+the switch goes down the DPDK app will receive a LSC event indicating link
+status down, and the app should stop the worker threads that are polling on
+the Rx/Tx queues. When switch comes up, a LSC event indicating ``LINK_UP`` is
+sent to the app, which can then restart the FM10000 port to resume network
+processing.
+
CRC striping
~~~~~~~~~~~~
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index bc200d39..cd468748 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -78,6 +78,8 @@ Prerequisites
- To get better performance on Intel platforms, please follow the "How to get best performance with NICs on Intel platforms"
section of the :ref:`Getting Started Guide for Linux <linux_gsg>`.
+- Upgrade the NVM/FW version following the `Intel® Ethernet NVM Update Tool Quick Usage Guide for Linux
+ <https://www-ssl.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-linux-usage-guide.html>`_ if needed.
Pre-Installation Configuration
------------------------------
@@ -396,13 +398,24 @@ used to classify MPLS packet by using a command in testpmd like:
testpmd> ethertype_filter 0 add mac_ignr 00:00:00:00:00:00 ethertype \
0x8847 fwd queue <M>
-16 Byte Descriptor cannot be used on DPDK VF
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+16 Byte RX Descriptor setting on DPDK VF
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If the Linux i40e kernel driver is used as host driver, while DPDK i40e PMD
-is used as the VF driver, DPDK cannot choose 16 byte receive descriptor. That
-is to say, user should keep ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n`` in
-config file.
+Currently the VF's RX descriptor mode is decided by PF. There's no PF-VF
+interface for VF to request the RX descriptor mode, also no interface to notify
+VF its own RX descriptor mode.
+For all available versions of the i40e driver, these drivers don't support 16
+byte RX descriptor. If the Linux i40e kernel driver is used as host driver,
+while DPDK i40e PMD is used as the VF driver, DPDK cannot choose 16 byte receive
+descriptor. The reason is that the RX descriptor is already set to 32 byte by
+the i40e kernel driver. That is to say, user should keep
+``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n`` in config file.
+In the future, if the Linux i40e driver supports 16 byte RX descriptor, user
+should make sure the DPDK VF uses the same RX descriptor mode, 16 byte or 32
+byte, as the PF driver.
+
+The same rule for DPDK PF + DPDK VF. The PF and VF should use the same RX
+descriptor mode. Or the VF RX will not work.
Receive packets with Ethertype 0x88A8
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -465,6 +478,11 @@ enabled using the steps below.
setpci -s <XX:XX.X> a8.w=<value>
+Vlan strip of VF
+~~~~~~~~~~~~~~~~
+
+The VF vlan strip function is only supported in the i40e kernel driver >= 2.1.26.
+
High Performance of Small Packets on 40G NIC
--------------------------------------------
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 36f4f3ff..23babe93 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -43,6 +43,7 @@ Network Interface Controller Drivers
bnx2x
bnxt
cxgbe
+ dpaa
dpaa2
e1000em
ena
@@ -55,7 +56,9 @@ Network Interface Controller Drivers
liquidio
mlx4
mlx5
+ mrvl
nfp
+ octeontx
qede
sfc_efx
szedata2
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 696ff693..c687c63f 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -227,6 +227,17 @@ So when the user sets different MTUs on PF and VF ports in one physical port,
the real MTU for all these PF and VF ports is the largest value set.
This behavior is based on the kernel driver behavior.
+VF MAC address setting
+~~~~~~~~~~~~~~~~~~~~~~
+
+On ixgbe, the concept of "pool" can be used for different things depending on
+the mode. In VMDq mode, "pool" means a VMDq pool. In IOV mode, "pool" means a
+VF.
+
+There is no RTE API to add a VF's MAC address from the PF. On ixgbe, the
+``rte_eth_dev_mac_addr_add()`` function can be used to add a VF's MAC address,
+as a workaround.
+
Supported Chipsets and NICs
---------------------------
diff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst
index f04cb16d..7bc16040 100644
--- a/doc/guides/nics/liquidio.rst
+++ b/doc/guides/nics/liquidio.rst
@@ -42,7 +42,9 @@ Supported LiquidIO Adapters
-----------------------------
- LiquidIO II CN2350 210SV/225SV
+- LiquidIO II CN2350 210SVPT
- LiquidIO II CN2360 210SV/225SV
+- LiquidIO II CN2360 210SVPT
Pre-Installation Configuration
@@ -195,6 +197,14 @@ This section provides instructions to configure SR-IOV with Linux OS.
Done
testpmd>
+#. Enabling VF promiscuous mode
+
+ One VF per PF can be marked as trusted for promiscuous mode.
+
+ .. code-block:: console
+
+ ip link set dev <PF iface> vf <VF id> trust on
+
Limitations
-----------
diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst
index f8885b23..5c3fb764 100644
--- a/doc/guides/nics/mlx4.rst
+++ b/doc/guides/nics/mlx4.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright 2012-2015 6WIND S.A.
+ Copyright 2012 6WIND S.A.
Copyright 2015 Mellanox
Redistribution and use in source and binary forms, with or without
@@ -78,29 +78,9 @@ Features
--------
- Multi arch support: x86_64 and POWER8.
-- RSS, also known as RCA, is supported. In this mode the number of
- configured RX queues must be a power of two.
-- VLAN filtering is supported.
- Link state information is provided.
-- Promiscuous mode is supported.
-- All multicast mode is supported.
-- Multiple MAC addresses (unicast, multicast) can be configured.
-- Scattered packets are supported for TX and RX.
-- Inner L3/L4 (IP, TCP and UDP) TX/RX checksum offloading and validation.
-- Outer L3 (IP) TX/RX checksum offloading and validation for VXLAN frames.
-- Secondary process TX is supported.
- RX interrupts.
-Limitations
------------
-
-- RSS hash key cannot be modified.
-- RSS RETA cannot be configured
-- RSS always includes L3 (IPv4/IPv6) and L4 (UDP/TCP). They cannot be
- dissociated.
-- Hardware counters are not implemented (they are software counters).
-- Secondary process RX is not supported.
-
Configuration
-------------
@@ -127,13 +107,6 @@ These options can be modified in the ``.config`` file.
to abort with harmless debugging messages as a workaround.
Relevant only when CONFIG_RTE_LIBRTE_MLX4_DEBUG is enabled.
-- ``CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N`` (default **4**)
-
- Number of scatter/gather elements (SGEs) per work request (WR). Lowering
- this number improves performance but also limits the ability to receive
- scattered packets (packets that do not fit a single mbuf). The default
- value is a safe tradeoff.
-
- ``CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE`` (default **0**)
Amount of data to be inlined during TX operations. Improves latency but
@@ -147,11 +120,6 @@ These options can be modified in the ``.config`` file.
This value is always 1 for RX queues since they use a single MP.
-- ``CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS`` (default **1**)
-
- Toggle software counters. No counters are available if this option is
- disabled since hardware counters are not supported.
-
Environment variables
~~~~~~~~~~~~~~~~~~~~~
@@ -164,9 +132,6 @@ Environment variables
Run-time configuration
~~~~~~~~~~~~~~~~~~~~~~
-- The only constraint when RSS mode is requested is to make sure the number
- of RX queues is a power of two. This is a hardware requirement.
-
- librte_pmd_mlx4 brings kernel network interfaces up during initialization
because it is affected by their state. Forcing them down prevents packets
reception.
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f4cb18bc..f9558da8 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -92,21 +92,23 @@ Features
- Flow director (RTE_FDIR_MODE_PERFECT, RTE_FDIR_MODE_PERFECT_MAC_VLAN and
RTE_ETH_FDIR_REJECT).
- Flow API.
-- Secondary process TX is supported.
+- Multiple process.
- KVM and VMware ESX SR-IOV modes are supported.
- RSS hash result is supported.
- Hardware TSO.
- Hardware checksum TX offload for VXLAN and GRE.
- RX interrupts.
- Statistics query including Basic, Extended and per queue.
+- Rx HW timestamp.
Limitations
-----------
- Inner RSS for VXLAN frames is not supported yet.
-- Port statistics through software counters only.
+- Port statistics through software counters only. Flow statistics are
+ supported by hardware counters.
- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
-- Secondary process RX is not supported.
+- Forked secondary process not supported.
- Flow pattern without any specific vlan will match for vlan packets as well:
When VLAN spec is not specified in the pattern, the matching rule will be created with VLAN as a wild card.
@@ -124,6 +126,14 @@ Limitations
Will match any ipv4 packet (VLAN included).
+- A multi segment packet must have less than 6 segments in case the Tx burst function
+ is set to multi-packet send or Enhanced multi-packet send. Otherwise it must have
+ less than 50 segments.
+- Count action for RTE flow is only supported in Mellanox OFED 4.2.
+- Flows with a VXLAN Network Identifier equal (or ends to be equal)
+ to 0 are not supported.
+- VXLAN TSO and checksum offloads are not supported on VM.
+
Configuration
-------------
@@ -167,6 +177,23 @@ Environment variables
This is disabled by default since this can also decrease performance for
unaligned packet sizes.
+- ``MLX5_SHUT_UP_BF``
+
+ Configures HW Tx doorbell register as IO-mapped.
+
+ By default, the HW Tx doorbell is configured as a write-combining register.
+ The register would be flushed to HW usually when the write-combining buffer
+ becomes full, but it depends on CPU design.
+
+ Except for vectorized Tx burst routines, a write memory barrier is enforced
+ after updating the register so that the update can be immediately visible to
+ HW.
+
+ When vectorized Tx burst is called, the barrier is set only if the burst size
+ is not aligned to MLX5_VPMD_TX_MAX_BURST. However, setting this environmental
+ variable will bring better latency even though the maximum throughput can
+ slightly decline.
+
Run-time configuration
~~~~~~~~~~~~~~~~~~~~~~
@@ -293,7 +320,7 @@ DPDK and must be installed separately:
This library basically implements send/receive calls to the hardware
queues.
-- **Kernel modules** (mlnx-ofed-kernel)
+- **Kernel modules**
They provide the kernel-side Verbs API and low level device drivers that
manage actual hardware initialization and resources sharing with user
@@ -320,18 +347,33 @@ DPDK and must be installed separately:
Both libraries are BSD and GPL licensed. Linux kernel modules are GPL
licensed.
-Currently supported by DPDK:
+Installation
+~~~~~~~~~~~~
-- Mellanox OFED version: **4.1**.
-- firmware version:
+Either RDMA Core library with a recent enough Linux kernel release
+(recommended) or Mellanox OFED, which provides compatibility with older
+releases.
- - ConnectX-4: **12.20.1010** and above.
- - ConnectX-4 Lx: **14.20.1010** and above.
- - ConnectX-5: **16.20.1010** and above.
- - ConnectX-5 Ex: **16.20.1010** and above.
+RMDA Core with Linux Kernel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Getting Mellanox OFED
-~~~~~~~~~~~~~~~~~~~~~
+- Minimal kernel version : v4.14 or the most recent 4.14-rc (see `Linux installation documentation`_)
+- Minimal rdma-core version: v15+ commit 0c5f5765213a ("Merge pull request #227 from yishaih/tm")
+ (see `RDMA Core installation documentation`_)
+
+.. _`Linux installation documentation`: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/plain/Documentation/admin-guide/README.rst
+.. _`RDMA Core installation documentation`: https://raw.githubusercontent.com/linux-rdma/rdma-core/master/README.md
+
+Mellanox OFED
+^^^^^^^^^^^^^
+
+- Mellanox OFED version: **4.2**.
+- firmware version:
+
+ - ConnectX-4: **12.21.1000** and above.
+ - ConnectX-4 Lx: **14.21.1000** and above.
+ - ConnectX-5: **16.21.1000** and above.
+ - ConnectX-5 Ex: **16.21.1000** and above.
While these libraries and kernel modules are available on OpenFabrics
Alliance's `website <https://www.openfabrics.org/>`__ and provided by package
@@ -373,8 +415,8 @@ Supported NICs
* Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
* Mellanox(R) ConnectX(R)-5 Ex EN 100G MCX516A-CDAT (2x100G)
-Quick Start Guide
------------------
+Quick Start Guide on OFED
+-------------------------
1. Download latest Mellanox OFED. For more info check the `prerequisites`_.
@@ -384,7 +426,7 @@ Quick Start Guide
.. code-block:: console
- ./mlnxofedinstall
+ ./mlnxofedinstall --upstream-libs --dpdk
3. Verify the firmware is the correct one:
diff --git a/doc/guides/nics/mrvl.rst b/doc/guides/nics/mrvl.rst
new file mode 100644
index 00000000..fbfdf478
--- /dev/null
+++ b/doc/guides/nics/mrvl.rst
@@ -0,0 +1,253 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Marvell International Ltd.
+ Copyright(c) 2017 Semihalf.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+MRVL Poll Mode Driver
+======================
+
+The MRVL PMD (librte_pmd_mrvl) provides poll mode driver support
+for the Marvell PPv2 (Packet Processor v2) 1/10 Gbps adapter.
+
+Detailed information about SoCs that use PPv2 can be obtained here:
+
+* https://www.marvell.com/embedded-processors/armada-70xx/
+* https://www.marvell.com/embedded-processors/armada-80xx/
+
+.. Note::
+
+ Due to external dependencies, this driver is disabled by default. It must
+ be enabled manually by setting relevant configuration option manually.
+ Please refer to `Config File Options`_ section for further details.
+
+
+Features
+--------
+
+Features of the MRVL PMD are:
+
+- Speed capabilities
+- Link status
+- Queue start/stop
+- MTU update
+- Jumbo frame
+- Promiscuous mode
+- Allmulticast mode
+- Unicast MAC filter
+- Multicast MAC filter
+- RSS hash
+- VLAN filter
+- CRC offload
+- L3 checksum offload
+- L4 checksum offload
+- Packet type parsing
+- Basic stats
+- QoS
+
+
+Limitations
+-----------
+
+- Number of lcores is limited to 9 by MUSDK internal design. If more lcores
+ need to be allocated, locking will have to be considered. Number of available
+ lcores can be changed via ``MRVL_MUSDK_HIFS_RESERVED`` define in
+ ``mrvl_ethdev.c`` source file.
+
+- Flushing vlans added for filtering is not possible due to MUSDK missing
+ functionality. Current workaround is to reset board so that PPv2 has a
+ chance to start in a sane state.
+
+
+Prerequisites
+-------------
+
+- Custom Linux Kernel sources available
+ `here <https://github.com/MarvellEmbeddedProcessors/linux-marvell/tree/linux-4.4.52-armada-17.08>`__.
+
+- Out of tree `mvpp2x_sysfs` kernel module sources available
+ `here <https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell/tree/mvpp2x-armada-17.08>`__.
+
+- MUSDK (Marvell User-Space SDK) sources available
+ `here <https://github.com/MarvellEmbeddedProcessors/musdk-marvell/tree/musdk-armada-17.08>`__.
+
+ MUSDK is a light-weight library that provides direct access to Marvell's
+ PPv2 (Packet Processor v2). Alternatively prebuilt MUSDK library can be
+ requested from `Marvell Extranet <https://extranet.marvell.com>`_. Once
+ approval has been granted, library can be found by typing ``musdk`` in
+ the search box.
+
+ MUSDK must be configured with the following features:
+
+ .. code-block:: console
+
+ --enable-bpool-dma=64
+
+- DPDK environment
+
+ Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup
+ DPDK environment.
+
+
+Config File Options
+-------------------
+
+The following options can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_MRVL_PMD`` (default ``n``)
+
+ Toggle compilation of the librte_pmd_mrvl driver.
+
+- ``CONFIG_RTE_MRVL_MUSDK_DMA_MEMSIZE`` (default ``41943040``)
+
+ Size in bytes of the contiguous memory region that MUSDK will allocate
+ for run-time DMA-able data buffers.
+
+
+QoS Configuration
+-----------------
+
+QoS configuration is done through external configuration file. Path to the
+file must be given as `cfg` in driver's vdev parameter list.
+
+Configuration syntax
+~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ [port <portnum> default]
+ default_tc = <default_tc>
+ qos_mode = <qos_priority>
+
+ [port <portnum> tc <traffic_class>]
+ rxq = <rx_queue_list>
+ pcp = <pcp_list>
+ dscp = <dscp_list>
+
+ [port <portnum> tc <traffic_class>]
+ rxq = <rx_queue_list>
+ pcp = <pcp_list>
+ dscp = <dscp_list>
+
+Where:
+
+- ``<portnum>``: DPDK Port number (0..n).
+
+- ``<default_tc>``: Default traffic class (e.g. 0)
+
+- ``<qos_priority>``: QoS priority for mapping (`ip`, `vlan`, `ip/vlan` or `vlan/ip`).
+
+- ``<traffic_class>``: Traffic Class to be configured.
+
+- ``<rx_queue_list>``: List of DPDK RX queues (e.g. 0 1 3-4)
+
+- ``<pcp_list>``: List of PCP values to handle in particular TC (e.g. 0 1 3-4 7).
+
+- ``<dscp_list>``: List of DSCP values to handle in particular TC (e.g. 0-12 32-48 63).
+
+Setting PCP/DSCP values for the default TC is not required. All PCP/DSCP
+values not assigned explicitly to particular TC will be handled by the
+default TC.
+
+Configuration file example
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ [port 0 default]
+ default_tc = 0
+ qos_mode = ip
+
+ [port 0 tc 0]
+ rxq = 0 1
+
+ [port 0 tc 1]
+ rxq = 2
+ pcp = 5 6 7
+ dscp = 26-38
+
+ [port 1 default]
+ default_tc = 0
+ qos_mode = vlan/ip
+
+ [port 1 tc 0]
+ rxq = 0
+
+ [port 1 tc 1]
+ rxq = 1 2
+ pcp = 5 6 7
+ dscp = 26-38
+
+Usage example
+^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2,cfg=/home/user/mrvl.conf \
+ -c 7 -- -i -a --disable-hw-vlan-strip --rxq=2
+
+
+Building DPDK
+-------------
+
+Driver needs precompiled MUSDK library during compilation. Please consult
+``doc/musdk_get_started.txt`` for the detailed build instructions.
+
+Before the DPDK build process the environmental variable ``LIBMUSDK_PATH`` with
+the path to the MUSDK installation directory needs to be exported.
+
+
+Usage Example
+-------------
+
+MRVL PMD requires extra out of tree kernel modules to function properly.
+`musdk_uio` and `mv_pp_uio` sources are part of the MUSDK. Please consult
+``doc/musdk_get_started.txt`` for the detailed build instructions.
+For `mvpp2x_sysfs` please consult ``Documentation/pp22_sysfs.txt`` for the
+detailed build instructions.
+
+.. code-block:: console
+
+ insmod musdk_uio.ko
+ insmod mv_pp_uio.ko
+ insmod mvpp2x_sysfs.ko
+
+Additionally interfaces used by DPDK application need to be put up:
+
+.. code-block:: console
+
+ ip link set eth0 up
+ ip link set eth1 up
+
+In order to run testpmd example application following command can be used:
+
+.. code-block:: console
+
+ ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2 -c 7 -- \
+ --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \
+ -i -a --disable-hw-vlan-strip --rss-udp
diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
index c732fb1f..99a3b76e 100644
--- a/doc/guides/nics/nfp.rst
+++ b/doc/guides/nics/nfp.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2015 Netronome Systems, Inc. All rights reserved.
+ Copyright(c) 2015-2017 Netronome Systems, Inc. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,31 +38,32 @@ up to 400 Gbps.
This document explains how to use DPDK with the Netronome Poll Mode
Driver (PMD) supporting Netronome's Network Flow Processor 6xxx
-(NFP-6xxx).
+(NFP-6xxx) and Netronome's Flow Processor 4xxx (NFP-4xxx).
-Currently the driver supports virtual functions (VFs) only.
+NFP is a SRIOV capable device and the PMD driver supports the physical
+function (PF) and virtual functions (VFs).
Dependencies
------------
-Before using the Netronome's DPDK PMD some NFP-6xxx configuration,
+Before using the Netronome's DPDK PMD some NFP configuration,
which is not related to DPDK, is required. The system requires
-installation of **Netronome's BSP (Board Support Package)** which includes
-Linux drivers, programs and libraries.
+installation of **Netronome's BSP (Board Support Package)** along
+with some specific NFP firmware application. Netronome's NSP ABI
+version should be 0.20 or higher.
-If you have a NFP-6xxx device you should already have the code and
-documentation for doing this configuration. Contact
+If you have a NFP device you should already have the code and
+documentation for doing all this configuration. Contact
**support@netronome.com** to obtain the latest available firmware.
-The NFP Linux kernel drivers (including the required PF driver for the
-NFP) are available on Github at
+The NFP Linux netdev kernel driver for VFs is part of vanilla kernel
+since kernel version 4.5, and support for the PF since kernel version
+4.11. Support for older kernels can be obtained on Github at
**https://github.com/Netronome/nfp-drv-kmods** along with build
instructions.
-DPDK runs in userspace and PMDs uses the Linux kernel UIO interface to
-allow access to physical devices from userspace. The NFP PMD requires
-the **igb_uio** UIO driver, available with DPDK, to perform correct
-initialization.
+NFP PMD needs to be used along with UIO ``igb_uio`` or VFIO (``vfio-pci``)
+Linux kernel driver.
Building the software
---------------------
@@ -71,7 +72,7 @@ Netronome's PMD code is provided in the **drivers/net/nfp** directory.
Although NFP PMD has Netronome´s BSP dependencies, it is possible to
compile it along with other DPDK PMDs even if no BSP was installed before.
Of course, a DPDK app will require such a BSP installed for using the
-NFP PMD.
+NFP PMD, along with a specific NFP firmware application.
Default PMD configuration is at **common_linuxapp configuration** file:
@@ -87,14 +88,53 @@ Driver compilation and testing
Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
for details.
+Using the PF
+------------
+
+NFP PMD has support for using the NFP PF as another DPDK port, but it does not
+have any functionality for controlling VFs. In fact, it is not possible to use
+the PMD with the VFs if the PF is being used by DPDK, that is, with the NFP PF
+bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK version will
+have a PMD able to work with the PF and VFs at the same time and with the PF
+implementing VF management along with other PF-only functionalities/offloads.
+
+The PMD PF has extra work to do which will delay the DPDK app initialization
+like checking if a firmware is already available in the device, uploading the
+firmware if necessary, and configure the Link state properly when starting or
+stopping a PF port. Note that firmware upload is not always necessary which is
+the main delay for NFP PF PMD initialization.
+
+Depending on the Netronome product installed in the system, firmware files
+should be available under ``/lib/firmware/netronome``. DPDK PMD supporting the
+PF requires a specific link, ``/lib/firmware/netronome/nic_dpdk_default.nffw``,
+which should be created automatically with Netronome's Agilio products
+installation.
+
+PF multiport support
+--------------------
+
+Some NFP cards support several physical ports with just one single PCI device.
+DPDK core is designed with the 1:1 relationship between PCI devices and DPDK
+ports, so NFP PMD PF support requires handling the multiport case specifically.
+During NFP PF initialization, the PMD will extract the information about the
+number of PF ports from the firmware and will create as many DPDK ports as
+needed.
+
+Because the unusual relationship between a single PCI device and several DPDK
+ports, there are some limitations when using more than one PF DPDK ports: there
+is no support for RX interrupts and it is not possible either to use those PF
+ports with the device hotplug functionality.
+
System configuration
--------------------
-#. **Enable SR-IOV on the NFP-6xxx device:** The current NFP PMD works with
- Virtual Functions (VFs) on a NFP device. Make sure that one of the Physical
- Function (PF) drivers from the above Github repository is installed and
- loaded.
+#. **Enable SR-IOV on the NFP device:** The current NFP PMD supports the PF and
+ the VFs on a NFP device. However, it is not possible to work with both at the
+ same time because the VFs require the PF being bound to the NFP PF Linux
+ netdev driver. Make sure you are working with a kernel with NFP PF support or
+ get the drivers from the above Github repository and follow the instructions
+ for building and installing it.
Virtual Functions need to be enabled before they can be used with the PMD.
Before enabling the VFs it is useful to obtain information about the
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
new file mode 100644
index 00000000..a6631cd0
--- /dev/null
+++ b/doc/guides/nics/octeontx.rst
@@ -0,0 +1,252 @@
+.. BSD LICENSE
+ Copyright (C) Cavium, Inc. 2017.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+OCTEONTX Poll Mode driver
+=========================
+
+The OCTEONTX ETHDEV PMD (**librte_pmd_octeontx**) provides poll mode ethdev
+driver support for the inbuilt network device found in the **Cavium OCTEONTX**
+SoC family as well as their virtual functions (VF) in SR-IOV context.
+
+More information can be found at `Cavium, Inc Official Website
+<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
+
+Features
+--------
+
+Features of the OCTEONTX Ethdev PMD are:
+
+- Packet type information
+- Promiscuous mode
+- Port hardware statistics
+- Jumbo frames
+- Link state information
+- SR-IOV VF
+- Multiple queues for TX
+- Lock-free Tx queue
+- HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
+
+Supported OCTEONTX SoCs
+-----------------------
+
+- CN83xx
+
+Unsupported features
+--------------------
+
+The features supported by the device and not yet supported by this PMD include:
+
+- Receive Side Scaling (RSS)
+- Scattered and gather for TX and RX
+- Ingress classification support
+- Egress hierarchical scheduling, traffic shaping, and marking
+
+Prerequisites
+-------------
+
+There are three main pre-perquisites for executing OCTEONTX PMD on a OCTEONTX
+compatible board:
+
+1. **OCTEONTX Linux kernel PF driver for Network acceleration HW blocks**
+
+ The OCTEONTX Linux kernel drivers (including the required PF driver for the
+ all network acceleration blocks) are available on GitHub at
+ `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
+ along with build, install and dpdk usage instructions.
+
+2. **ARM64 Tool Chain**
+
+ For example, the *aarch64* Linaro Toolchain, which can be obtained from
+ `here <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+ As an alternative method, OCTEONTX PMD can also be executed using images provided
+ as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
+ to bring up a OCTEONTX board.
+
+ SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+
+Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_PMD`` (default ``y``)
+
+ Toggle compilation of the ``librte_pmd_octeontx`` driver.
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_INIT`` (default ``n``)
+
+ Toggle display of initialization related messages.
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_RX`` (default ``n``)
+
+ Toggle display of receive path message
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_TX`` (default ``n``)
+
+ Toggle display of transmit path message
+
+- ``CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_MBOX`` (default ``n``)
+
+ Toggle display of mbox related message
+
+
+Driver compilation and testing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+To compile the OCTEONTX PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-thunderx-linuxapp-gcc install
+
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ ./arm64-thunderx-linuxapp-gcc/app/testpmd -c 700 \
+ --base-virtaddr=0x100000000000 \
+ --mbuf-pool-ops-name="octeontx_fpavf" \
+ --vdev='event_octeontx' \
+ --vdev='eth_octeontx,nr_port=2' \
+ -- --rxq=1 --txq=1 --nb-core=2 --total-num-mbufs=16384 \
+ --disable-hw-vlan-filter -i
+ .....
+ EAL: Detected 24 lcore(s)
+ EAL: Probing VFIO support...
+ EAL: VFIO support initialized
+ .....
+ EAL: PCI device 0000:07:00.1 on NUMA socket 0
+ EAL: probe driver: 177d:a04b octeontx_ssovf
+ .....
+ EAL: PCI device 0001:02:00.7 on NUMA socket 0
+ EAL: probe driver: 177d:a0dd octeontx_pkivf
+ .....
+ EAL: PCI device 0001:03:01.0 on NUMA socket 0
+ EAL: probe driver: 177d:a049 octeontx_pkovf
+ .....
+ PMD: octeontx_probe(): created ethdev eth_octeontx for port 0
+ PMD: octeontx_probe(): created ethdev eth_octeontx for port 1
+ .....
+ Configuring Port 0 (socket 0)
+ Port 0: 00:0F:B7:11:94:46
+ Configuring Port 1 (socket 0)
+ Port 1: 00:0F:B7:11:94:47
+ .....
+ Checking link statuses...
+ Port 0 Link Up - speed 40000 Mbps - full-duplex
+ Port 1 Link Up - speed 40000 Mbps - full-duplex
+ Done
+ testpmd>
+
+
+Initialization
+--------------
+
+The octeontx ethdev pmd is exposed as a vdev device which consists of a set
+of PKI and PKO PCIe VF devices. On EAL initialization,
+PKI/PKO PCIe VF devices will be probed and then the vdev device can be created
+from the application code, or from the EAL command line based on
+the number of probed/bound PKI/PKO PCIe VF device to DPDK by
+
+* Invoking ``rte_vdev_init("eth_octeontx")`` from the application
+
+* Using ``--vdev="eth_octeontx"`` in the EAL options, which will call
+ rte_vdev_init() internally
+
+Device arguments
+~~~~~~~~~~~~~~~~
+Each ethdev port is mapped to a physical port(LMAC), Application can specify
+the number of interesting ports with ``nr_ports`` argument.
+
+Dependency
+~~~~~~~~~~
+``eth_octeontx`` pmd is depend on ``event_octeontx`` eventdev device and
+``octeontx_fpavf`` external mempool handler.
+
+Example:
+
+.. code-block:: console
+
+ ./your_dpdk_application --mbuf-pool-ops="octeontx_fpavf" \
+ --vdev='event_octeontx' \
+ --vdev="eth_octeontx,nr_port=2"
+
+Limitations
+-----------
+
+``octeontx_fpavf`` external mempool handler dependency
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The OCTEONTX SoC family NIC has inbuilt HW assisted external mempool manager.
+This driver will only work with ``octeontx_fpavf`` external mempool handler
+as it is the most performance effective way for packet allocation and Tx buffer
+recycling on OCTEONTX SoC platform.
+
+CRC striping
+~~~~~~~~~~~~
+
+The OCTEONTX SoC family NICs strip the CRC for every packets coming into the
+host interface. So, CRC will be stripped even when the
+``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``.
+
+Maximum packet length
+~~~~~~~~~~~~~~~~~~~~~
+
+The OCTEONTX SoC family NICs support a maximum of a 32K jumbo frame. The value
+is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
+up to 32k bytes can still reach the host interface.
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index 7761989c..ae2b54a2 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -148,6 +148,8 @@ Supported actions:
- QUEUE
+- RSS
+
Validating flow rules depends on the firmware variant.
Ethernet destinaton individual/group match
@@ -243,12 +245,16 @@ boolean parameters value.
features available and required by the datapath implementation.
**efx** chooses libefx-based datapath which supports VLAN insertion
(full-feature firmware variant only), TSO and multi-segment mbufs.
+ Mbuf segments may come from different mempools, and mbuf reference
+ counters are treated responsibly.
**ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
more efficient than libefx-based but has no VLAN insertion and TSO
support yet.
+ Mbuf segments may come from different mempools, and mbuf reference
+ counters are treated responsibly.
**ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which
is even more faster then **ef10** but does not support multi-segment
- mbufs.
+ mbufs, disallows multiple mempools and neglects mbuf reference counters.
- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index f3ee95d2..04086b11 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -113,7 +113,14 @@ Flow API support
----------------
The tap PMD supports major flow API pattern items and actions, when running on
-linux kernels above 4.2 ("Flower" classifier required). Supported items:
+linux kernels above 4.2 ("Flower" classifier required).
+The kernel support can be checked with this command::
+
+ zcat /proc/config.gz | ( grep 'CLS_FLOWER=' || echo 'not supported' ) |
+ tee -a /dev/stderr | grep -q '=m' &&
+ lsmod | ( grep cls_flower || echo 'try modprobe cls_flower' )
+
+Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid. (requires kernel 4.9)
diff --git a/doc/guides/nics/vhost.rst b/doc/guides/nics/vhost.rst
index e651a166..4f7ae899 100644
--- a/doc/guides/nics/vhost.rst
+++ b/doc/guides/nics/vhost.rst
@@ -66,6 +66,11 @@ The user can specify below arguments in `--vdev` option.
It is used to specify the number of queues virtio-net device has.
(Default: 1)
+#. ``iommu-support``:
+
+ It is used to enable iommu support in vhost library.
+ (Default: 0 (disabled))
+
Vhost PMD event handling
------------------------
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index 4d6a8376..af82f86e 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -291,6 +291,13 @@ interrupt, Rx interrupts, and Tx interrupts. Config interrupt is used for
notification of device configuration changes, especially link status (lsc).
Interrupt mode is translated into Rx interrupts in the context of DPDK.
+.. Note::
+
+ Virtio PMD already has support for receiving lsc from qemu when the link
+ status changes, especially when vhost user disconnects. However, it fails
+ to do that if the VM is created by qemu 2.6.2 or below, since the
+ capability to detect vhost user disconnection is introduced in qemu 2.7.0.
+
Prerequisites for Rx interrupts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index 75ae085f..2b338b92 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -539,12 +539,12 @@ chain.
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} digest; /**< Digest parameters */
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} aad;
/**< Additional authentication parameters */
} aead;
@@ -566,7 +566,7 @@ chain.
struct {
uint8_t *data;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
} digest; /**< Digest parameters */
} auth;
};
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index fff1c063..9e834fc5 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -117,17 +117,6 @@ The physical address of the reserved memory for that memory zone is also returne
Memory reservations done using the APIs provided by rte_malloc are also backed by pages from the hugetlbfs filesystem.
-Xen Dom0 support without hugetbls
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The existing memory management implementation is based on the Linux kernel hugepage mechanism.
-However, Xen Dom0 does not support hugepages, so a new Linux kernel module rte_dom0_mm is added to workaround this limitation.
-
-The EAL uses IOCTL interface to notify the Linux kernel module rte_dom0_mm to allocate memory of specified size,
-and get all memory segments information from the module,
-and the EAL uses MMAP interface to map the allocated memory.
-For each memory segment, the physical addresses are contiguous within it but actual hardware addresses are contiguous within 2MB.
-
PCI Access
~~~~~~~~~~
@@ -164,7 +153,7 @@ which can trigger the generation of a core file, readable by gdb.
CPU Feature Identification
~~~~~~~~~~~~~~~~~~~~~~~~~~
-The EAL can query the CPU at runtime (using the rte_cpu_get_feature() function) to determine which CPU features are available.
+The EAL can query the CPU at runtime (using the rte_cpu_get_features() function) to determine which CPU features are available.
User Space Interrupt Event
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
new file mode 100644
index 00000000..4e722219
--- /dev/null
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -0,0 +1,168 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Event Ethernet Rx Adapter Library
+=================================
+
+The DPDK Eventdev API allows the application to use an event driven programming
+model for packet processing. In this model, the application polls an event
+device port for receiving events that reference packets instead of polling Rx
+queues of ethdev ports. Packet transfer between ethdev and the event device can
+be supported in hardware or require a software thread to receive packets from
+the ethdev port using ethdev poll mode APIs and enqueue these as events to the
+event device using the eventdev API. Both transfer mechanisms may be present on
+the same platform depending on the particular combination of the ethdev and
+the event device.
+
+The Event Ethernet Rx Adapter library is intended for the application code to
+configure both transfer mechanisms using a common API. A capability API allows
+the eventdev PMD to advertise features supported for a given ethdev and allows
+the application to perform configuration as per supported features.
+
+API Walk-through
+----------------
+
+This section will introduce the reader to the adapter API. The
+application has to first instantiate an adapter which is associated with
+a single eventdev, next the adapter instance is configured with Rx queues
+that are either polled by a SW thread or linked using hardware support. Finally
+the adapter is started.
+
+For SW based packet transfers from ethdev to eventdev, the adapter uses a
+DPDK service function and the application is also required to assign a core to
+the service function.
+
+Creating an Adapter Instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An adapter instance is created using ``rte_event_eth_rx_adapter_create()``. This
+function is passed the event device to be associated with the adapter and port
+configuration for the adapter to setup an event port if the adapter needs to use
+a service function.
+
+.. code-block:: c
+
+ int err;
+ uint8_t dev_id;
+ struct rte_event_dev_info dev_info;
+ struct rte_event_port_conf rx_p_conf;
+
+ err = rte_event_dev_info_get(id, &dev_info);
+
+ rx_p_conf.new_event_threshold = dev_info.max_num_events;
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+ err = rte_event_eth_rx_adapter_create(id, dev_id, &rx_p_conf);
+
+If the application desires to have finer control of eventdev port allocation
+and setup, it can use the ``rte_event_eth_rx_adapter_create_ext()`` function.
+The ``rte_event_eth_rx_adapter_create_ext()`` function is passed a callback
+function. The callback function is invoked if the adapter needs to use a
+service function and needs to create an event port for it. The callback is
+expected to fill the ``struct rte_event_eth_rx_adapter_conf structure``
+passed to it.
+
+Adding Rx Queues to the Adapter Instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ethdev Rx queues are added to the instance using the
+``rte_event_eth_rx_adapter_queue_add()`` function. Configuration for the Rx
+queue is passed in using a ``struct rte_event_eth_rx_adapter_queue_conf``
+parameter. Event information for packets from this Rx queue is encoded in the
+``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The
+servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf
+is the relative polling frequency of the Rx queue and is applicable when the
+adapter uses a service core function.
+
+.. code-block:: c
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+
+ err = rte_event_eth_rx_adapter_queue_add(id,
+ eth_dev_id,
+ 0, &queue_config);
+
+Querying Adapter Capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_caps_get()`` function allows
+the application to query the adapter capabilities for an eventdev and ethdev
+combination. For e.g, if the ``RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID``
+is set, the application can override the adapter generated flow ID in the event
+using ``rx_queue_flags`` field in ``struct rte_event_eth_rx_adapter_queue_conf``
+which is passed as a parameter to the ``rte_event_eth_rx_adapter_queue_add()``
+function.
+
+.. code-block:: c
+
+ err = rte_event_eth_rx_adapter_caps_get(dev_id, eth_dev_id, &cap);
+
+ queue_config.rx_queue_flags = 0;
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+ ev.flow_id = 1;
+ queue_config.rx_queue_flags =
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ }
+
+Configuring the Service Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the adapter uses a service function, the application is required to assign
+a service core to the service function as show below.
+
+.. code-block:: c
+
+ uint32_t service_id;
+
+ if (rte_event_eth_rx_adapter_service_id_get(0, &service_id) == 0)
+ rte_service_map_lcore_set(service_id, RX_CORE_ID);
+
+Starting the Adapter Instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application calls ``rte_event_eth_rx_adapter_start()`` to start the adapter.
+This function calls the start callbacks of the eventdev PMDs for hardware based
+eventdev-ethdev connections and ``rte_service_run_state_set()`` to enable the
+service function if one exists.
+
+Getting Adapter Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_eth_rx_adapter_stats_get()`` function reports counters defined
+in struct ``rte_event_eth_rx_adapter_stats``. The received packet and
+enqueued event counts are a sum of the counts from the eventdev PMD callbacks
+if the callback is supported, and the counts maintained by the service function,
+if one exists. The service function also maintains a count of cycles for which
+it was not able to enqueue to the event device.
diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
index 908d123a..be9fccdd 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -217,7 +217,7 @@ calling the setup function. Repeat this step for each queue, starting from
.. code-block:: c
struct rte_event_queue_conf atomic_conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -320,7 +320,7 @@ The following code shows how those packets can be enqueued into the eventdev:
for (i = 0; i < nb_rx; i++) {
ev[i].flow_id = mbufs[i]->hash.rss;
ev[i].op = RTE_EVENT_OP_NEW;
- ev[i].sched_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
ev[i].queue_id = 0;
ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
ev[i].sub_event_type = 0;
diff --git a/doc/guides/prog_guide/flow_classify_lib.rst b/doc/guides/prog_guide/flow_classify_lib.rst
new file mode 100644
index 00000000..820dc72a
--- /dev/null
+++ b/doc/guides/prog_guide/flow_classify_lib.rst
@@ -0,0 +1,427 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Flow Classification Library
+===========================
+
+DPDK provides a Flow Classification library that provides the ability
+to classify an input packet by matching it against a set of Flow rules.
+
+The initial implementation supports counting of IPv4 5-tuple packets which match
+a particular Flow rule only.
+
+Please refer to the
+:doc:`./rte_flow`
+for more information.
+
+The Flow Classification library uses the ``librte_table`` API for managing Flow
+rules and matching packets against the Flow rules.
+The library is table agnostic and can use the following tables:
+``Access Control List``, ``Hash`` and ``Longest Prefix Match(LPM)``.
+The ``Access Control List`` table is used in the initial implementation.
+
+Please refer to the
+:doc:`./packet_framework`
+for more information.on ``librte_table``.
+
+DPDK provides an Access Control List library that provides the ability to
+classify an input packet based on a set of classification rules.
+
+Please refer to the
+:doc:`./packet_classif_access_ctrl`
+library for more information on ``librte_acl``.
+
+There is also a Flow Classify sample application which demonstrates the use of
+the Flow Classification Library API's.
+
+Please refer to the
+:doc:`../sample_app_ug/flow_classify`
+for more information on the ``flow_classify`` sample application.
+
+Overview
+--------
+
+The library has the following API's
+
+.. code-block:: c
+
+ /**
+ * Flow classifier create
+ *
+ * @param params
+ * Parameters for flow classifier creation
+ * @return
+ * Handle to flow classifier instance on success or NULL otherwise
+ */
+ struct rte_flow_classifier *
+ rte_flow_classifier_create(struct rte_flow_classifier_params *params);
+
+ /**
+ * Flow classifier free
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+ int
+ rte_flow_classifier_free(struct rte_flow_classifier *cls);
+
+ /**
+ * Flow classify table create
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @param params
+ * Parameters for flow_classify table creation
+ * @param table_id
+ * Table ID. Valid only within the scope of table IDs of the current
+ * classifier. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+ int
+ rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params,
+ uint32_t *table_id);
+
+ /**
+ * Add a flow classify rule to the flow_classifier table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[in] attr
+ * Flow rule attributes
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Structure
+ * initialised in case of error only.
+ * @return
+ * A valid handle in case of success, NULL otherwise.
+ */
+ struct rte_flow_classify_rule *
+ rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+ /**
+ * Delete a flow classify rule from the flow_classifier table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[in] rule
+ * Flow classify rule
+ * @return
+ * 0 on success, error code otherwise.
+ */
+ int
+ rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_flow_classify_rule *rule);
+
+ /**
+ * Query flow classifier for given rule.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[in] pkts
+ * Pointer to packets to process
+ * @param[in] nb_pkts
+ * Number of packets to process
+ * @param[in] rule
+ * Flow classify rule
+ * @param[in] stats
+ * Flow classify stats
+ *
+ * @return
+ * 0 on success, error code otherwise.
+ */
+ int
+ rte_flow_classifier_query(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats);
+
+Classifier creation
+~~~~~~~~~~~~~~~~~~~
+
+The application creates the ``Classifier`` using the
+``rte_flow_classifier_create`` API.
+The ``rte_flow_classify_params`` structure must be initialised by the
+application before calling the API.
+
+.. code-block:: c
+
+ struct rte_flow_classifier_params {
+ /** flow classifier name */
+ const char *name;
+
+ /** CPU socket ID where memory for the flow classifier and its */
+ /** elements (tables) should be allocated */
+ int socket_id;
+
+ /** Table type */
+ enum rte_flow_classify_table_type type;
+ };
+
+The ``Classifier`` has the following internal structures:
+
+.. code-block:: c
+
+ struct rte_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ uint32_t entry_size;
+ enum rte_flow_classify_table_type type;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+ };
+
+ #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
+
+ struct rte_flow_classifier {
+ /* Input parameters */
+ char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
+ int socket_id;
+ enum rte_flow_classify_table_type type;
+
+ /* Internal tables */
+ struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
+ uint32_t num_tables;
+ uint16_t nb_pkts;
+ struct rte_flow_classify_table_entry
+ *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ } __rte_cache_aligned;
+
+Adding a table to the Classifier
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application adds a table to the ``Classifier`` using the
+``rte_flow_classify_table_create`` API.
+The ``rte_flow_classify_table_params`` structure must be initialised by the
+application before calling the API.
+
+.. code-block:: c
+
+ struct rte_flow_classify_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+
+ /** Opaque param to be passed to the table create operation */
+ void *arg_create;
+
+ /** Memory size to be reserved per classifier object entry for */
+ /** storing meta data */
+ uint32_t table_metadata_size;
+ };
+
+To create an ACL table the ``rte_table_acl_params`` structure must be
+initialised and assigned to ``arg_create`` in the
+``rte_flow_classify_table_params`` structure.
+
+.. code-block:: c
+
+ struct rte_table_acl_params {
+ /** Name */
+ const char *name;
+
+ /** Maximum number of ACL rules in the table */
+ uint32_t n_rules;
+
+ /** Number of fields in the ACL rule specification */
+ uint32_t n_rule_fields;
+
+ /** Format specification of the fields of the ACL rule */
+ struct rte_acl_field_def field_format[RTE_ACL_MAX_FIELDS];
+ };
+
+The fields for the ACL rule must also be initialised by the application.
+
+An ACL table can be added to the ``Classifier`` for each ACL rule, for example
+another table could be added for the IPv6 5-tuple rule.
+
+Flow Parsing
+~~~~~~~~~~~~
+
+The library currently supports three IPv4 5-tuple flow patterns, for UDP, TCP
+and SCTP.
+
+.. code-block:: c
+
+ /* Pattern for IPv4 5-tuple UDP filter */
+ static enum rte_flow_item_type pattern_ntuple_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ };
+
+ /* Pattern for IPv4 5-tuple TCP filter */
+ static enum rte_flow_item_type pattern_ntuple_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+ };
+
+ /* Pattern for IPv4 5-tuple SCTP filter */
+ static enum rte_flow_item_type pattern_ntuple_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+ };
+
+The internal function ``flow_classify_parse_flow`` parses the
+IPv4 5-tuple pattern, attributes and actions and returns the 5-tuple data in the
+``rte_eth_ntuple_filter`` structure.
+
+.. code-block:: c
+
+ static int
+ flow_classify_parse_flow(
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+
+Adding Flow Rules
+~~~~~~~~~~~~~~~~~
+
+The ``rte_flow_classify_table_entry_add`` API creates an
+``rte_flow_classify`` object which contains the flow_classify id and type, the
+action, a union of add and delete keys and a union of rules.
+It uses the ``flow_classify_parse_flow`` internal function for parsing the
+flow parameters.
+The 5-tuple ACL key data is obtained from the ``rte_eth_ntuple_filter``
+structure populated by the ``classify_parse_ntuple_filter`` function which
+parses the Flow rule.
+
+.. code-block:: c
+
+ struct acl_keys {
+ struct rte_table_acl_rule_add_params key_add; /* add key */
+ struct rte_table_acl_rule_delete_params key_del; /* delete key */
+ };
+
+ struct classify_rules {
+ enum rte_flow_classify_rule_type type;
+ union {
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+ } u;
+ };
+
+ struct rte_flow_classify {
+ uint32_t id; /* unique ID of classify object */
+ struct rte_flow_action action; /* action when match found */
+ struct classify_rules rules; /* union of rules */
+ union {
+ struct acl_keys key;
+ } u;
+ int key_found; /* rule key found in table */
+ void *entry; /* pointer to buffer to hold rule meta data */
+ void *entry_ptr; /* handle to the table entry for rule meta data */
+ };
+
+It then calls the ``table[table_id].ops.f_add`` API to add the rule to the ACL
+table.
+
+Deleting Flow Rules
+~~~~~~~~~~~~~~~~~~~
+
+The ``rte_flow_classify_table_entry_delete`` API calls the
+``table[table_id].ops.f_delete`` API to delete a rule from the ACL table.
+
+Packet Matching
+~~~~~~~~~~~~~~~
+
+The ``rte_flow_classifier_query`` API is used to find packets which match a
+given flow Flow rule in the table.
+This API calls the flow_classify_run internal function which calls the
+``table[table_id].ops.f_lookup`` API to see if any packets in a burst match any
+of the Flow rules in the table.
+The meta data for the highest priority rule matched for each packet is returned
+in the entries array in the ``rte_flow_classify`` object.
+The internal function ``action_apply`` implements the ``Count`` action which is
+used to return data which matches a particular Flow rule.
+
+The rte_flow_classifier_query API uses the following structures to return data
+to the application.
+
+.. code-block:: c
+
+ /** IPv4 5-tuple data */
+ struct rte_flow_classify_ipv4_5tuple {
+ uint32_t dst_ip; /**< Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /**< Mask of destination IP address. */
+ uint32_t src_ip; /**< Source IP address in big endian. */
+ uint32_t src_ip_mask; /**< Mask of destination IP address. */
+ uint16_t dst_port; /**< Destination port in big endian. */
+ uint16_t dst_port_mask; /**< Mask of destination port. */
+ uint16_t src_port; /**< Source Port in big endian. */
+ uint16_t src_port_mask; /**< Mask of source port. */
+ uint8_t proto; /**< L4 protocol. */
+ uint8_t proto_mask; /**< Mask of L4 protocol. */
+ };
+
+ /**
+ * Flow stats
+ *
+ * For the count action, stats can be returned by the query API.
+ *
+ * Storage for stats is provided by the application.
+ *
+ *
+ */
+ struct rte_flow_classify_stats {
+ void *stats;
+ };
+
+ struct rte_flow_classify_5tuple_stats {
+ /** count of packets that match IPv4 5tuple pattern */
+ uint64_t counter1;
+ /** IPv4 5tuple data */
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+ };
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
new file mode 100644
index 00000000..4bbf01a4
--- /dev/null
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -0,0 +1,257 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Generic Segmentation Offload Library
+====================================
+
+Overview
+--------
+Generic Segmentation Offload (GSO) is a widely used software implementation of
+TCP Segmentation Offload (TSO), which reduces per-packet processing overhead.
+Much like TSO, GSO gains performance by enabling upper layer applications to
+process a smaller number of large packets (e.g. MTU size of 64KB), instead of
+processing higher numbers of small packets (e.g. MTU size of 1500B), thus
+reducing per-packet overhead.
+
+For example, GSO allows guest kernel stacks to transmit over-sized TCP segments
+that far exceed the kernel interface's MTU; this eliminates the need to segment
+packets within the guest, and improves the data-to-overhead ratio of both the
+guest-host link, and PCI bus. The expectation of the guest network stack in this
+scenario is that segmentation of egress frames will take place either in the NIC
+HW, or where that hardware capability is unavailable, either in the host
+application, or network stack.
+
+Bearing that in mind, the GSO library enables DPDK applications to segment
+packets in software. Note however, that GSO is implemented as a standalone
+library, and not via a 'fallback' mechanism (i.e. for when TSO is unsupported
+in the underlying hardware); that is, applications must explicitly invoke the
+GSO library to segment packets. The size of GSO segments ``(segsz)`` is
+configurable by the application.
+
+Limitations
+-----------
+
+#. The GSO library doesn't check if input packets have correct checksums.
+
+#. In addition, the GSO library doesn't re-calculate checksums for segmented
+ packets (that task is left to the application).
+
+#. IP fragments are unsupported by the GSO library.
+
+#. The egress interface's driver must support multi-segment packets.
+
+#. Currently, the GSO library supports the following IPv4 packet types:
+
+ - TCP
+ - VxLAN
+ - GRE
+
+ See `Supported GSO Packet Types`_ for further details.
+
+Packet Segmentation
+-------------------
+
+The ``rte_gso_segment()`` function is the GSO library's primary
+segmentation API.
+
+Before performing segmentation, an application must create a GSO context object
+``(struct rte_gso_ctx)``, which provides the library with some of the
+information required to understand how the packet should be segmented. Refer to
+`How to Segment a Packet`_ for additional details on same. Once the GSO context
+has been created, and populated, the application can then use the
+``rte_gso_segment()`` function to segment packets.
+
+The GSO library typically stores each segment that it creates in two parts: the
+first part contains a copy of the original packet's headers, while the second
+part contains a pointer to an offset within the original packet. This mechanism
+is explained in more detail in `GSO Output Segment Format`_.
+
+The GSO library supports both single- and multi-segment input mbufs.
+
+GSO Output Segment Format
+~~~~~~~~~~~~~~~~~~~~~~~~~
+To reduce the number of expensive memcpy operations required when segmenting a
+packet, the GSO library typically stores each segment that it creates as a
+two-part mbuf (technically, this is termed a 'two-segment' mbuf; however, since
+the elements produced by the API are also called 'segments', for clarity the
+term 'part' is used here instead).
+
+The first part of each output segment is a direct mbuf and contains a copy of
+the original packet's headers, which must be prepended to each output segment.
+These headers are copied from the original packet into each output segment.
+
+The second part of each output segment, represents a section of data from the
+original packet, i.e. a data segment. Rather than copy the data directly from
+the original packet into the output segment (which would impact performance
+considerably), the second part of each output segment is an indirect mbuf,
+which contains no actual data, but simply points to an offset within the
+original packet.
+
+The combination of the 'header' segment and the 'data' segment constitutes a
+single logical output GSO segment of the original packet. This is illustrated
+in :numref:`figure_gso-output-segment-format`.
+
+.. _figure_gso-output-segment-format:
+
+.. figure:: img/gso-output-segment-format.svg
+ :align: center
+
+ Two-part GSO output segment
+
+In one situation, the output segment may contain additional 'data' segments.
+This only occurs when:
+
+- the input packet on which GSO is to be performed is represented by a
+ multi-segment mbuf.
+
+- the output segment is required to contain data that spans the boundaries
+ between segments of the input multi-segment mbuf.
+
+The GSO library traverses each segment of the input packet, and produces
+numerous output segments; for optimal performance, the number of output
+segments is kept to a minimum. Consequently, the GSO library maximizes the
+amount of data contained within each output segment; i.e. each output segment
+``segsz`` bytes of data. The only exception to this is in the case of the very
+final output segment; if ``pkt_len`` % ``segsz``, then the final segment is
+smaller than the rest.
+
+In order for an output segment to meet its MSS, it may need to include data from
+multiple input segments. Due to the nature of indirect mbufs (each indirect mbuf
+can point to only one direct mbuf), the solution here is to add another indirect
+mbuf to the output segment; this additional segment then points to the next
+input segment. If necessary, this chaining process is repeated, until the sum of
+all of the data 'contained' in the output segment reaches ``segsz``. This
+ensures that the amount of data contained within each output segment is uniform,
+with the possible exception of the last segment, as previously described.
+
+:numref:`figure_gso-three-seg-mbuf` illustrates an example of a three-part
+output segment. In this example, the output segment needs to include data from
+the end of one input segment, and the beginning of another. To achieve this,
+an additional indirect mbuf is chained to the second part of the output segment,
+and is attached to the next input segment (i.e. it points to the data in the
+next input segment).
+
+.. _figure_gso-three-seg-mbuf:
+
+.. figure:: img/gso-three-seg-mbuf.svg
+ :align: center
+
+ Three-part GSO output segment
+
+Supported GSO Packet Types
+--------------------------
+
+TCP/IPv4 GSO
+~~~~~~~~~~~~
+TCP/IPv4 GSO supports segmentation of suitably large TCP/IPv4 packets, which
+may also contain an optional VLAN tag.
+
+VxLAN GSO
+~~~~~~~~~
+VxLAN packets GSO supports segmentation of suitably large VxLAN packets,
+which contain an outer IPv4 header, inner TCP/IPv4 headers, and optional
+inner and/or outer VLAN tag(s).
+
+GRE GSO
+~~~~~~~
+GRE GSO supports segmentation of suitably large GRE packets, which contain
+an outer IPv4 header, inner TCP/IPv4 headers, and an optional VLAN tag.
+
+How to Segment a Packet
+-----------------------
+
+To segment an outgoing packet, an application must:
+
+#. First create a GSO context ``(struct rte_gso_ctx)``; this contains:
+
+ - a pointer to the mbuf pool for allocating the direct buffers, which are
+ used to store the GSO segments' packet headers.
+
+ - a pointer to the mbuf pool for allocating indirect buffers, which are
+ used to locate GSO segments' packet payloads.
+
+ .. note::
+
+ An application may use the same pool for both direct and indirect
+ buffers. However, since indirect mbufs simply store a pointer, the
+ application may reduce its memory consumption by creating a separate memory
+ pool, containing smaller elements, for the indirect pool.
+
+
+ - the size of each output segment, including packet headers and payload,
+ measured in bytes.
+
+ - the bit mask of required GSO types. The GSO library uses the same macros as
+ those that describe a physical device's TX offloading capabilities (i.e.
+ ``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
+ wants to segment TCP/IPv4 packets, it should set gso_types to
+ ``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
+ supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
+ ``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
+ allowed.
+
+ - a flag, that indicates whether the IPv4 headers of output segments should
+ contain fixed or incremental ID values.
+
+2. Set the appropriate ol_flags in the mbuf.
+
+ - The GSO library use the value of an mbuf's ``ol_flags`` attribute to
+ to determine how a packet should be segmented. It is the application's
+ responsibility to ensure that these flags are set.
+
+ - For example, in order to segment TCP/IPv4 packets, the application should
+ add the ``PKT_TX_IPV4`` and ``PKT_TX_TCP_SEG`` flags to the mbuf's
+ ol_flags.
+
+ - If checksum calculation in hardware is required, the application should
+ also add the ``PKT_TX_TCP_CKSUM`` and ``PKT_TX_IP_CKSUM`` flags.
+
+#. Check if the packet should be processed. Packets with one of the
+ following properties are not processed and are returned immediately:
+
+ - Packet length is less than ``segsz`` (i.e. GSO is not required).
+
+ - Packet type is not supported by GSO library (see
+ `Supported GSO Packet Types`_).
+
+ - Application has not enabled GSO support for the packet type.
+
+ - Packet's ol_flags have been incorrectly set.
+
+#. Allocate space in which to store the output GSO segments. If the amount of
+ space allocated by the application is insufficient, segmentation will fail.
+
+#. Invoke the GSO segmentation API, ``rte_gso_segment()``.
+
+#. If required, update the L3 and L4 checksums of the newly-created segments.
+ For tunneled packets, the outer IPv4 headers' checksums should also be
+ updated. Alternatively, the application may offload checksum calculation
+ to HW.
+
diff --git a/doc/guides/prog_guide/img/gso-output-segment-format.svg b/doc/guides/prog_guide/img/gso-output-segment-format.svg
new file mode 100644
index 00000000..bdb5ec33
--- /dev/null
+++ b/doc/guides/prog_guide/img/gso-output-segment-format.svg
@@ -0,0 +1,313 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export gso-output-segment-format.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="19.3975in" height="8.21796in"
+ viewBox="0 0 1396.62 591.693" xml:space="preserve" color-interpolation-filters="sRGB" class="st21">
+ <v:documentProperties v:langID="1033" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#006fc5;stroke:#006fc5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0552552}
+ .st2 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st3 {stroke:#c3d600;stroke-linecap:round;stroke-linejoin:round;stroke-width:3.68828}
+ .st4 {fill:#c3d600;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st5 {stroke:#8f9d00;stroke-linecap:round;stroke-linejoin:round;stroke-width:3.75735}
+ .st6 {fill:#00aeef;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st7 {stroke:#007fb0;stroke-linecap:round;stroke-linejoin:round;stroke-width:3.75735}
+ .st8 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st9 {fill:#ffffff;font-family:Intel Clear;font-size:1.99999em;font-weight:bold}
+ .st10 {fill:#000000;stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0552552}
+ .st11 {fill:#ffffff;font-family:Intel Clear;font-size:2.44732em;font-weight:bold}
+ .st12 {fill:none;stroke:#ffffff;stroke-linecap:round;stroke-linejoin:round;stroke-width:5.52552}
+ .st13 {fill:#000000;font-family:Intel Clear;font-size:2.15291em}
+ .st14 {fill:#000000;font-family:Intel Clear;font-size:1.8401em}
+ .st15 {fill:#006fc5;stroke:#006fc5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0276276}
+ .st16 {fill:#c3d600;font-family:Intel Clear;font-size:2.44732em}
+ .st17 {fill:#ffc000;font-family:Intel Clear;font-size:2.44732em}
+ .st18 {fill:#ffc000;stroke:#ffc000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0276276}
+ .st19 {fill:#0070c0;font-family:Intel Clear;font-size:1.8401em}
+ .st20 {fill:#006fc5;font-family:Intel Clear;font-size:1.61927em}
+ .st21 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="8.50394"
+ v:shadowOffsetY="-8.50394"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(577.244,-560.42)">
+ <title>Sheet.3</title>
+ <path d="M9.24 585.29 L16.32 585.29 L16.32 587.06 L9.24 587.06 L9.24 585.29 L9.24 585.29 ZM21.63 585.29 L23.4 585.29
+ L23.4 587.06 L21.63 587.06 L21.63 585.29 L21.63 585.29 ZM28.7 585.29 L35.78 585.29 L35.78 587.06 L28.7 587.06
+ L28.7 585.29 L28.7 585.29 ZM41.09 585.29 L42.86 585.29 L42.86 587.06 L41.09 587.06 L41.09 585.29 L41.09
+ 585.29 ZM48.17 585.29 L55.25 585.29 L55.25 587.06 L48.17 587.06 L48.17 585.29 L48.17 585.29 ZM60.56 585.29
+ L62.33 585.29 L62.33 587.06 L60.56 587.06 L60.56 585.29 L60.56 585.29 ZM67.64 585.29 L74.72 585.29 L74.72
+ 587.06 L67.64 587.06 L67.64 585.29 L67.64 585.29 ZM80.03 585.29 L81.8 585.29 L81.8 587.06 L80.03 587.06
+ L80.03 585.29 L80.03 585.29 ZM87.11 585.29 L94.19 585.29 L94.19 587.06 L87.11 587.06 L87.11 585.29 L87.11
+ 585.29 ZM99.5 585.29 L101.27 585.29 L101.27 587.06 L99.5 587.06 L99.5 585.29 L99.5 585.29 ZM106.58 585.29
+ L113.66 585.29 L113.66 587.06 L106.58 587.06 L106.58 585.29 L106.58 585.29 ZM118.97 585.29 L120.74 585.29
+ L120.74 587.06 L118.97 587.06 L118.97 585.29 L118.97 585.29 ZM126.05 585.29 L133.13 585.29 L133.13 587.06
+ L126.05 587.06 L126.05 585.29 L126.05 585.29 ZM138.43 585.29 L140.2 585.29 L140.2 587.06 L138.43 587.06
+ L138.43 585.29 L138.43 585.29 ZM145.51 585.29 L152.59 585.29 L152.59 587.06 L145.51 587.06 L145.51 585.29
+ L145.51 585.29 ZM157.9 585.29 L159.67 585.29 L159.67 587.06 L157.9 587.06 L157.9 585.29 L157.9 585.29 ZM164.98
+ 585.29 L172.06 585.29 L172.06 587.06 L164.98 587.06 L164.98 585.29 L164.98 585.29 ZM177.37 585.29 L179.14
+ 585.29 L179.14 587.06 L177.37 587.06 L177.37 585.29 L177.37 585.29 ZM184.45 585.29 L191.53 585.29 L191.53
+ 587.06 L184.45 587.06 L184.45 585.29 L184.45 585.29 ZM196.84 585.29 L198.61 585.29 L198.61 587.06 L196.84
+ 587.06 L196.84 585.29 L196.84 585.29 ZM203.92 585.29 L211 585.29 L211 587.06 L203.92 587.06 L203.92 585.29
+ L203.92 585.29 ZM216.31 585.29 L218.08 585.29 L218.08 587.06 L216.31 587.06 L216.31 585.29 L216.31 585.29
+ ZM223.39 585.29 L230.47 585.29 L230.47 587.06 L223.39 587.06 L223.39 585.29 L223.39 585.29 ZM235.78 585.29
+ L237.55 585.29 L237.55 587.06 L235.78 587.06 L235.78 585.29 L235.78 585.29 ZM242.86 585.29 L249.93 585.29
+ L249.93 587.06 L242.86 587.06 L242.86 585.29 L242.86 585.29 ZM255.24 585.29 L257.01 585.29 L257.01 587.06
+ L255.24 587.06 L255.24 585.29 L255.24 585.29 ZM262.32 585.29 L269.4 585.29 L269.4 587.06 L262.32 587.06
+ L262.32 585.29 L262.32 585.29 ZM274.71 585.29 L276.48 585.29 L276.48 587.06 L274.71 587.06 L274.71 585.29
+ L274.71 585.29 ZM281.79 585.29 L288.87 585.29 L288.87 587.06 L281.79 587.06 L281.79 585.29 L281.79 585.29
+ ZM294.18 585.29 L295.95 585.29 L295.95 587.06 L294.18 587.06 L294.18 585.29 L294.18 585.29 ZM301.26 585.29
+ L308.34 585.29 L308.34 587.06 L301.26 587.06 L301.26 585.29 L301.26 585.29 ZM313.65 585.29 L315.42 585.29
+ L315.42 587.06 L313.65 587.06 L313.65 585.29 L313.65 585.29 ZM320.73 585.29 L324.99 585.29 L324.99 587.06
+ L320.73 587.06 L320.73 585.29 L320.73 585.29 ZM11.06 591.69 L0 586.17 L11.06 580.65 L11.06 591.69 L11.06
+ 591.69 ZM323.16 580.65 L334.22 586.17 L323.16 591.69 L323.16 580.65 L323.16 580.65 Z" class="st1"/>
+ </g>
+ <g id="shape4-3" v:mID="4" v:groupContext="shape" transform="translate(184.298,-201.906)">
+ <title>Sheet.4</title>
+ <path d="M94.04 570.43 L117.87 557.26 L0 344.58 L47.68 318.26 L165.55 530.94 L189.39 517.79 L168.08 591.69 L94.04 570.43
+ Z" class="st2"/>
+ </g>
+ <g id="shape5-5" v:mID="5" v:groupContext="shape" transform="translate(184.298,-201.906)">
+ <title>Sheet.5</title>
+ <path d="M94.04 570.43 L117.87 557.26 L0 344.58 L47.68 318.26 L165.55 530.94 L189.39 517.79 L168.08 591.69 L94.04 570.43"
+ class="st3"/>
+ </g>
+ <g id="shape6-8" v:mID="6" v:groupContext="shape" transform="translate(119.408,-447.917)">
+ <title>Sheet.6</title>
+ <path d="M0 510.21 L0 591.69 L129.86 591.69 L129.86 510.21 L0 510.21 L0 510.21 Z" class="st4"/>
+ </g>
+ <g id="shape7-10" v:mID="7" v:groupContext="shape" transform="translate(119.408,-447.917)">
+ <title>Sheet.7</title>
+ <path d="M0 510.21 L129.86 510.21 L129.86 591.69 L0 591.69 L0 510.21" class="st5"/>
+ </g>
+ <g id="shape10-13" v:mID="10" v:groupContext="shape" transform="translate(250.819,-447.917)">
+ <title>Sheet.10</title>
+ <path d="M0 510.21 L0 591.69 L822.53 591.69 L822.53 510.21 L0 510.21 L0 510.21 Z" class="st6"/>
+ </g>
+ <g id="shape11-15" v:mID="11" v:groupContext="shape" transform="translate(250.819,-447.917)">
+ <title>Sheet.11</title>
+ <path d="M0 510.21 L822.53 510.21 L822.53 591.69 L0 591.69 L0 510.21" class="st7"/>
+ </g>
+ <g id="shape12-18" v:mID="12" v:groupContext="shape" transform="translate(255.478,-470.123)">
+ <title>Sheet.12</title>
+ <desc>Payload 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="157.315" cy="574.07" width="314.63" height="35.245"/>
+ <path d="M314.63 556.45 L0 556.45 L0 591.69 L314.63 591.69 L314.63 556.45" class="st8"/>
+ <text x="102.08" y="581.27" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 0</text> </g>
+ <g id="shape13-22" v:mID="13" v:groupContext="shape" transform="translate(577.354,-470.123)">
+ <title>Sheet.13</title>
+ <desc>Payload 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="167.112" cy="574.07" width="334.23" height="35.245"/>
+ <path d="M334.22 556.45 L0 556.45 L0 591.69 L334.22 591.69 L334.22 556.45" class="st8"/>
+ <text x="111.88" y="581.27" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 1</text> </g>
+ <g id="shape14-26" v:mID="14" v:groupContext="shape" transform="translate(910.635,-470.956)">
+ <title>Sheet.14</title>
+ <desc>Payload 2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="81.8509" cy="574.07" width="163.71" height="35.245"/>
+ <path d="M163.7 556.45 L0 556.45 L0 591.69 L163.7 591.69 L163.7 556.45" class="st8"/>
+ <text x="26.61" y="581.27" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 2</text> </g>
+ <g id="shape15-30" v:mID="15" v:groupContext="shape" transform="translate(909.144,-453.824)">
+ <title>Sheet.15</title>
+ <path d="M1.16 453.85 L1.05 465.33 L3.93 465.39 L4.04 453.91 L1.16 453.85 L1.16 453.85 ZM1 473.95 L0.94 476.82 L3.82
+ 476.87 L3.87 474 L1 473.95 L1 473.95 ZM0.88 485.43 L0.77 496.91 L3.65 496.96 L3.76 485.48 L0.88 485.43 L0.88
+ 485.43 ZM0.72 505.52 L0.72 508.39 L3.59 508.45 L3.59 505.58 L0.72 505.52 L0.72 505.52 ZM0.61 517 L0.55 528.49
+ L3.43 528.54 L3.48 517.06 L0.61 517 L0.61 517 ZM0.44 537.1 L0.44 539.97 L3.32 540.02 L3.32 537.15 L0.44
+ 537.1 L0.44 537.1 ZM0.39 548.58 L0.28 560.06 L3.15 560.12 L3.26 548.63 L0.39 548.58 L0.39 548.58 ZM0.22
+ 568.67 L0.17 571.54 L3.04 571.6 L3.1 568.73 L0.22 568.67 L0.22 568.67 ZM0.11 580.16 L0 591.64 L2.88 591.69
+ L2.99 580.21 L0.11 580.16 L0.11 580.16 Z" class="st10"/>
+ </g>
+ <g id="shape16-32" v:mID="16" v:groupContext="shape" transform="translate(119.187,-447.917)">
+ <title>Sheet.16</title>
+ <path d="M0 510.21 L0 591.69 L129.86 591.69 L129.86 510.21 L0 510.21 L0 510.21 Z" class="st4"/>
+ </g>
+ <g id="shape17-34" v:mID="17" v:groupContext="shape" transform="translate(119.187,-447.917)">
+ <title>Sheet.17</title>
+ <path d="M0 510.21 L129.86 510.21 L129.86 591.69 L0 591.69 L0 510.21" class="st5"/>
+ </g>
+ <g id="shape18-37" v:mID="18" v:groupContext="shape" transform="translate(121.944,-471.034)">
+ <title>Sheet.18</title>
+ <desc>Header</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="61.0973" cy="574.07" width="122.2" height="35.245"/>
+ <path d="M122.19 556.45 L0 556.45 L0 591.69 L122.19 591.69 L122.19 556.45" class="st8"/>
+ <text x="20.61" y="581.27" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Header</text> </g>
+ <g id="shape19-41" v:mID="19" v:groupContext="shape" transform="translate(329.798,-1.87868)">
+ <title>Sheet.19</title>
+ <path d="M0 510.43 L0 591.69 L289.81 591.69 L289.81 510.43 L0 510.43 L0 510.43 Z" class="st4"/>
+ </g>
+ <g id="shape20-43" v:mID="20" v:groupContext="shape" transform="translate(329.798,-1.87868)">
+ <title>Sheet.20</title>
+ <path d="M0 510.43 L289.81 510.43 L289.81 591.69 L0 591.69 L0 510.43" class="st5"/>
+ </g>
+ <g id="shape21-46" v:mID="21" v:groupContext="shape" transform="translate(424.908,-21.567)">
+ <title>Sheet.21</title>
+ <desc>Header</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="61.0973" cy="574.07" width="122.2" height="35.245"/>
+ <path d="M122.19 556.45 L0 556.45 L0 591.69 L122.19 591.69 L122.19 556.45" class="st8"/>
+ <text x="11.55" y="582.88" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Header</text> </g>
+ <g id="shape22-50" v:mID="22" v:groupContext="shape" transform="translate(619.609,-1.87868)">
+ <title>Sheet.22</title>
+ <path d="M0 510.43 L0 591.69 L453.74 591.69 L453.74 510.43 L0 510.43 L0 510.43 Z" class="st6"/>
+ </g>
+ <g id="shape23-52" v:mID="23" v:groupContext="shape" transform="translate(619.609,-1.87868)">
+ <title>Sheet.23</title>
+ <path d="M0 510.43 L453.74 510.43 L453.74 591.69 L0 591.69 L0 510.43" class="st7"/>
+ </g>
+ <g id="shape24-55" v:mID="24" v:groupContext="shape" transform="translate(778.624,-21.5672)">
+ <title>Sheet.24</title>
+ <desc>Payload 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="81.8509" cy="574.07" width="163.71" height="35.245"/>
+ <path d="M163.7 556.45 L0 556.45 L0 591.69 L163.7 591.69 L163.7 556.45" class="st8"/>
+ <text x="14.26" y="582.88" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 1</text> </g>
+ <g id="shape25-59" v:mID="25" v:groupContext="shape" transform="translate(710.092,-113.83)">
+ <title>Sheet.25</title>
+ <path d="M0 522.69 C0 515.07 6.19 508.89 13.83 508.89 L349.43 508.89 C357.12 508.89 363.26 515.07 363.26 522.69 L363.26
+ 577.89 C363.26 585.57 357.12 591.69 349.43 591.69 L13.83 591.69 C6.19 591.69 0 585.57 0 577.89 L0 522.69
+ Z" class="st6"/>
+ </g>
+ <g id="shape26-61" v:mID="26" v:groupContext="shape" transform="translate(710.092,-113.83)">
+ <title>Sheet.26</title>
+ <path d="M0 522.69 C0 515.07 6.19 508.89 13.83 508.89 L349.43 508.89 C357.12 508.89 363.26 515.07 363.26 522.69 L363.26
+ 577.89 C363.26 585.57 357.12 591.69 349.43 591.69 L13.83 591.69 C6.19 591.69 0 585.57 0 577.89 L0 522.69
+ Z" class="st12"/>
+ </g>
+ <g id="shape27-63" v:mID="27" v:groupContext="shape" transform="translate(813.057,-150.108)">
+ <title>Sheet.27</title>
+ <desc>Indirect mbuf</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="94.1386" cy="576.19" width="188.28" height="31.0055"/>
+ <path d="M188.28 560.69 L0 560.69 L0 591.69 L188.28 591.69 L188.28 560.69" class="st8"/>
+ <text x="15.43" y="583.94" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Indirect mbuf</text> </g>
+ <g id="shape28-67" v:mID="28" v:groupContext="shape" transform="translate(810.845,-123.854)">
+ <title>Sheet.28</title>
+ <desc>(pointer to data)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="95.5065" cy="578.442" width="191.02" height="26.501"/>
+ <path d="M191.01 565.19 L0 565.19 L0 591.69 L191.01 591.69 L191.01 565.19" class="st8"/>
+ <text x="15.15" y="585.07" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(pointer to data)</text> </g>
+ <g id="shape29-71" v:mID="29" v:groupContext="shape" transform="translate(573.151,-149.601)">
+ <title>Sheet.29</title>
+ <path d="M0 584.74 L127.76 584.74 L127.76 587.61 L0 587.61 L0 584.74 L0 584.74 ZM125.91 580.65 L136.97 586.17 L125.91
+ 591.69 L125.91 580.65 L125.91 580.65 Z" class="st15"/>
+ </g>
+ <g id="shape30-73" v:mID="30" v:groupContext="shape" transform="translate(0,-309.671)">
+ <title>Sheet.30</title>
+ <desc>Memory copy</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="108.076" cy="574.07" width="216.16" height="35.245"/>
+ <path d="M216.15 556.45 L0 556.45 L0 591.69 L216.15 591.69 L216.15 556.45" class="st8"/>
+ <text x="17.68" y="582.88" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Memory copy</text> </g>
+ <g id="shape31-77" v:mID="31" v:groupContext="shape" transform="translate(680.77,-305.707)">
+ <title>Sheet.31</title>
+ <desc>No Memory Copy</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="136.547" cy="574.07" width="273.1" height="35.245"/>
+ <path d="M273.09 556.45 L0 556.45 L0 591.69 L273.09 591.69 L273.09 556.45" class="st8"/>
+ <text x="21.4" y="582.88" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>No Memory Copy</text> </g>
+ <g id="shape32-81" v:mID="32" v:groupContext="shape" transform="translate(1102.72,-26.7532)">
+ <title>Sheet.32</title>
+ <desc>Logical output segment</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="138.243" cy="578.442" width="276.49" height="26.501"/>
+ <path d="M276.49 565.19 L0 565.19 L0 591.69 L276.49 591.69 L276.49 565.19" class="st8"/>
+ <text x="20.73" y="585.07" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Logical output segment</text> </g>
+ <g id="shape36-85" v:mID="36" v:groupContext="shape" transform="translate(1106.81,-138.647)">
+ <title>Sheet.36</title>
+ <desc>Two-part output segment</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="144.906" cy="578.442" width="289.82" height="26.501"/>
+ <path d="M289.81 565.19 L0 565.19 L0 591.69 L289.81 591.69 L289.81 565.19" class="st8"/>
+ <text x="16.56" y="585.07" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Two-part output segment</text> </g>
+ <g id="shape37-89" v:mID="37" v:groupContext="shape" transform="translate(575.916,-453.879)">
+ <title>Sheet.37</title>
+ <path d="M2.88 453.91 L2.9 465.39 L0.03 465.39 L0 453.91 L2.88 453.91 L2.88 453.91 ZM2.9 474 L2.9 476.87 L0.03 476.87
+ L0.03 474 L2.9 474 L2.9 474 ZM2.9 485.48 L2.9 496.96 L0.03 496.96 L0.03 485.48 L2.9 485.48 L2.9 485.48 ZM2.9
+ 505.58 L2.9 508.45 L0.03 508.45 L0.03 505.58 L2.9 505.58 L2.9 505.58 ZM2.9 517.06 L2.9 528.54 L0.03 528.54
+ L0.03 517.06 L2.9 517.06 L2.9 517.06 ZM2.9 537.15 L2.9 540.02 L0.03 540.02 L0.03 537.15 L2.9 537.15 L2.9
+ 537.15 ZM2.9 548.63 L2.9 560.12 L0.03 560.12 L0.03 548.63 L2.9 548.63 L2.9 548.63 ZM2.9 568.73 L2.9 571.6
+ L0.03 571.6 L0.03 568.73 L2.9 568.73 L2.9 568.73 ZM2.9 580.21 L2.9 591.69 L0.03 591.69 L0.03 580.21 L2.9
+ 580.21 L2.9 580.21 Z" class="st18"/>
+ </g>
+ <g id="shape38-91" v:mID="38" v:groupContext="shape" transform="translate(577.354,-193.764)">
+ <title>Sheet.38</title>
+ <path d="M5.59 347.01 L10.92 357.16 L8.38 358.52 L3.04 348.36 L5.59 347.01 L5.59 347.01 ZM14.96 364.78 L16.29 367.32
+ L13.74 368.67 L12.42 366.13 L14.96 364.78 L14.96 364.78 ZM20.33 374.97 L25.66 385.12 L23.12 386.45 L17.78
+ 376.29 L20.33 374.97 L20.33 374.97 ZM29.7 392.74 L31.03 395.28 L28.48 396.61 L27.16 394.07 L29.7 392.74
+ L29.7 392.74 ZM35.04 402.9 L40.4 413.06 L37.86 414.38 L32.49 404.22 L35.04 402.9 L35.04 402.9 ZM44.41 420.67
+ L45.77 423.21 L43.22 424.57 L41.87 422.03 L44.41 420.67 L44.41 420.67 ZM49.78 430.83 L55.14 440.99 L52.6
+ 442.34 L47.23 432.18 L49.78 430.83 L49.78 430.83 ZM59.15 448.61 L60.51 451.15 L57.96 452.5 L56.61 449.96
+ L59.15 448.61 L59.15 448.61 ZM64.52 458.79 L69.88 468.95 L67.34 470.27 L61.97 460.12 L64.52 458.79 L64.52
+ 458.79 ZM73.89 476.57 L75.25 479.11 L72.7 480.43 L71.35 477.89 L73.89 476.57 L73.89 476.57 ZM79.26 486.72
+ L84.62 496.88 L82.08 498.21 L76.71 488.05 L79.26 486.72 L79.26 486.72 ZM88.63 504.5 L89.96 507.04 L87.41
+ 508.39 L86.09 505.85 L88.63 504.5 L88.63 504.5 ZM94 514.66 L99.33 524.81 L96.79 526.17 L91.45 516.01 L94
+ 514.66 L94 514.66 ZM103.37 532.43 L104.7 534.97 L102.15 536.32 L100.83 533.79 L103.37 532.43 L103.37 532.43
+ ZM108.73 542.62 L114.07 552.77 L111.53 554.1 L106.19 543.94 L108.73 542.62 L108.73 542.62 ZM118.11 560.39
+ L119.44 562.93 L116.89 564.26 L115.57 561.72 L118.11 560.39 L118.11 560.39 ZM123.45 570.55 L128.81 580.71
+ L126.27 582.03 L120.9 571.87 L123.45 570.55 L123.45 570.55 ZM132.82 588.33 L133.9 590.37 L131.36 591.69
+ L130.28 589.68 L132.82 588.33 L132.82 588.33 ZM0.28 351.89 L0 339.53 L10.07 346.73 L0.28 351.89 L0.28 351.89
+ Z" class="st18"/>
+ </g>
+ <g id="shape39-93" v:mID="39" v:groupContext="shape" transform="translate(329.798,-113.83)">
+ <title>Sheet.39</title>
+ <path d="M0 522.69 C0 515.07 6.19 508.89 13.83 508.89 L229.53 508.89 C237.19 508.89 243.35 515.07 243.35 522.69 L243.35
+ 577.89 C243.35 585.54 237.19 591.69 229.53 591.69 L13.83 591.69 C6.19 591.69 0 585.54 0 577.89 L0 522.69
+ Z" class="st4"/>
+ </g>
+ <g id="shape40-95" v:mID="40" v:groupContext="shape" transform="translate(329.798,-113.83)">
+ <title>Sheet.40</title>
+ <path d="M0 522.69 C0 515.07 6.19 508.89 13.83 508.89 L229.53 508.89 C237.19 508.89 243.35 515.07 243.35 522.69 L243.35
+ 577.89 C243.35 585.54 237.19 591.69 229.53 591.69 L13.83 591.69 C6.19 591.69 0 585.54 0 577.89 L0 522.69
+ Z" class="st12"/>
+ </g>
+ <g id="shape41-97" v:mID="41" v:groupContext="shape" transform="translate(368.774,-150.453)">
+ <title>Sheet.41</title>
+ <desc>Direct mbuf</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="82.7002" cy="576.19" width="165.41" height="31.0055"/>
+ <path d="M165.4 560.69 L0 560.69 L0 591.69 L165.4 591.69 L165.4 560.69" class="st8"/>
+ <text x="13.94" y="583.94" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Direct mbuf</text> </g>
+ <g id="shape42-101" v:mID="42" v:groupContext="shape" transform="translate(351.856,-123.854)">
+ <title>Sheet.42</title>
+ <desc>(copy of headers)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="102.121" cy="578.442" width="204.25" height="26.501"/>
+ <path d="M204.24 565.19 L0 565.19 L0 591.69 L204.24 591.69 L204.24 565.19" class="st8"/>
+ <text x="16.02" y="585.07" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(copy of headers)</text> </g>
+ <g id="shape43-105" v:mID="43" v:groupContext="shape" transform="translate(619.797,-155.563)">
+ <title>Sheet.43</title>
+ <desc>next</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.011" cy="578.442" width="56.03" height="26.501"/>
+ <path d="M56.02 565.19 L0 565.19 L0 591.69 L56.02 591.69 L56.02 565.19" class="st8"/>
+ <text x="6.35" y="585.07" class="st19" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>next</text> </g>
+ <g id="shape44-109" v:mID="44" v:groupContext="shape" transform="translate(700.911,-551.367)">
+ <title>Sheet.44</title>
+ <path d="M0 559.23 L0 591.69 L84.29 591.69 L84.29 559.23 L0 559.23 L0 559.23 Z" class="st2"/>
+ </g>
+ <g id="shape45-111" v:mID="45" v:groupContext="shape" transform="translate(709.883,-555.163)">
+ <title>Sheet.45</title>
+ <desc>segsz</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="30.7501" cy="580.032" width="61.51" height="23.3211"/>
+ <path d="M61.5 568.37 L0 568.37 L0 591.69 L61.5 591.69 L61.5 568.37" class="st8"/>
+ <text x="6.38" y="585.86" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>segsz</text> </g>
+ <g id="shape46-115" v:mID="46" v:groupContext="shape" transform="translate(1111.54,-477.36)">
+ <title>Sheet.46</title>
+ <desc>Input packet</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="74.9" cy="578.442" width="149.8" height="26.501"/>
+ <path d="M149.8 565.19 L0 565.19 L0 591.69 L149.8 591.69 L149.8 565.19" class="st8"/>
+ <text x="12.47" y="585.07" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Input packet</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/gso-three-seg-mbuf.svg b/doc/guides/prog_guide/img/gso-three-seg-mbuf.svg
new file mode 100644
index 00000000..0431012d
--- /dev/null
+++ b/doc/guides/prog_guide/img/gso-three-seg-mbuf.svg
@@ -0,0 +1,477 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export gso-three-seg-mbuf_new.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="25.5895in" height="9.63966in"
+ viewBox="0 0 1842.44 694.055" xml:space="preserve" color-interpolation-filters="sRGB" class="st23">
+ <title>GSO three-part output segment</title>
+ <v:documentProperties v:langID="1033" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffc000;stroke:#ffc000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0329073}
+ .st2 {fill:#006fc5;stroke:#006fc5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0329073}
+ .st3 {fill:none;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:3.42236}
+ .st4 {fill:#c3d600;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st5 {stroke:#8f9d00;stroke-linecap:round;stroke-linejoin:round;stroke-width:4.47539}
+ .st6 {fill:#00aeef;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st7 {stroke:#007fb0;stroke-linecap:round;stroke-linejoin:round;stroke-width:4.47539}
+ .st8 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st9 {fill:#ffffff;font-family:Calibri;font-size:2.08333em;font-weight:bold}
+ .st10 {fill:#ffffff;font-family:Intel Clear;font-size:2.91502em;font-weight:bold}
+ .st11 {fill:#000000;font-family:Intel Clear;font-size:2.19175em}
+ .st12 {fill:none;stroke:#ffffff;stroke-linecap:round;stroke-linejoin:round;stroke-width:6.58146}
+ .st13 {fill:#000000;font-family:Intel Clear;font-size:2.50001em}
+ .st14 {fill:#000000;font-family:Intel Clear;font-size:1.99999em}
+ .st15 {fill:#0070c0;font-family:Intel Clear;font-size:2.19175em}
+ .st16 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st17 {fill:#006fc5;font-family:Intel Clear;font-size:1.92874em}
+ .st18 {fill:#000000;stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0329073}
+ .st19 {fill:#0070c0;font-family:Intel Clear;font-size:1.5em}
+ .st20 {fill:#000000;stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0658146}
+ .st21 {fill:#000000;font-family:Intel Clear;font-size:1.81915em}
+ .st22 {fill:#000000;font-family:Intel Clear;font-size:1.49785em}
+ .st23 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="8.50394"
+ v:shadowOffsetY="-8.50394"/>
+ <v:layer v:name="top" v:index="0"/>
+ <v:layer v:name="middle" v:index="1"/>
+ <g id="shape111-1" v:mID="111" v:groupContext="shape" v:layerMember="0" transform="translate(787.208,-220.973)">
+ <title>Sheet.111</title>
+ <path d="M6.65 402.61 L13.01 414.71 L9.98 416.32 L3.62 404.22 L6.65 402.61 L6.65 402.61 ZM17.82 423.78 L19.4 426.81 L16.37
+ 428.42 L14.79 425.39 L17.82 423.78 L17.82 423.78 ZM24.21 435.91 L30.57 448.01 L27.54 449.59 L21.18 437.49
+ L24.21 435.91 L24.21 435.91 ZM35.38 457.08 L36.96 460.11 L33.93 461.69 L32.35 458.66 L35.38 457.08 L35.38
+ 457.08 ZM41.73 469.18 L48.12 481.28 L45.09 482.86 L38.7 470.76 L41.73 469.18 L41.73 469.18 ZM52.9 490.36
+ L54.51 493.38 L51.48 494.99 L49.87 491.97 L52.9 490.36 L52.9 490.36 ZM59.29 502.45 L65.68 514.55 L62.65
+ 516.16 L56.26 504.06 L59.29 502.45 L59.29 502.45 ZM70.46 523.63 L72.07 526.65 L69.04 528.26 L67.43 525.24
+ L70.46 523.63 L70.46 523.63 ZM76.85 535.76 L83.24 547.86 L80.21 549.43 L73.82 537.34 L76.85 535.76 L76.85
+ 535.76 ZM88.01 556.93 L89.63 559.95 L86.6 561.53 L84.98 558.51 L88.01 556.93 L88.01 556.93 ZM94.4 569.03
+ L100.79 581.13 L97.76 582.7 L91.37 570.61 L94.4 569.03 L94.4 569.03 ZM105.57 590.2 L107.15 593.22 L104.12
+ 594.84 L102.54 591.81 L105.57 590.2 L105.57 590.2 ZM111.96 602.3 L118.32 614.4 L115.28 616.01 L108.93 603.91
+ L111.96 602.3 L111.96 602.3 ZM123.12 623.47 L124.71 626.5 L121.67 628.11 L120.09 625.08 L123.12 623.47 L123.12
+ 623.47 ZM129.51 635.6 L135.87 647.7 L132.84 649.28 L126.48 637.18 L129.51 635.6 L129.51 635.6 ZM140.68 656.77
+ L142.26 659.8 L139.23 661.38 L137.65 658.35 L140.68 656.77 L140.68 656.77 ZM147.04 668.87 L153.43 680.97
+ L150.4 682.55 L144.01 670.45 L147.04 668.87 L147.04 668.87 ZM158.2 690.04 L159.49 692.48 L156.46 694.06
+ L155.17 691.66 L158.2 690.04 L158.2 690.04 ZM0.33 408.43 L0 393.7 L11.99 402.28 L0.33 408.43 L0.33 408.43
+ Z" class="st1"/>
+ </g>
+ <g id="shape110-3" v:mID="110" v:groupContext="shape" v:layerMember="0" transform="translate(685.078,-560.166)">
+ <title>Sheet.110</title>
+ <path d="M0 685.77 L90.67 685.77 L90.67 689.19 L0 689.19 L0 685.77 L0 685.77 ZM89.36 680.9 L97.21 687.48 L89.36 694.06
+ L89.36 680.9 L89.36 680.9 Z" class="st2"/>
+ </g>
+ <g id="shape4-5" v:mID="4" v:groupContext="shape" transform="translate(718.715,-469.955)">
+ <title>Sheet.4</title>
+ <path d="M0 655.13 L0 678.22 C0 686.97 11.69 694.06 26.05 694.06 C40.45 694.06 52.11 686.97 52.11 678.22 L52.11 673.91
+ L59.55 673.91 L44.66 664.86 L29.78 673.91 L37.22 673.91 L37.22 678.22 C37.22 681.98 32.25 685 26.05 685
+ C19.89 685 14.89 681.98 14.89 678.22 L14.89 655.13 L0 655.13 Z" class="st3"/>
+ </g>
+ <g id="shape5-7" v:mID="5" v:groupContext="shape" transform="translate(547.831,-656.823)">
+ <title>Sheet.5</title>
+ <path d="M11 686.43 L19.43 686.43 L19.43 688.53 L11 688.53 L11 686.43 L11 686.43 ZM25.76 686.43 L27.87 686.43 L27.87
+ 688.53 L25.76 688.53 L25.76 686.43 L25.76 686.43 ZM34.19 686.43 L42.62 686.43 L42.62 688.53 L34.19 688.53
+ L34.19 686.43 L34.19 686.43 ZM48.95 686.43 L51.05 686.43 L51.05 688.53 L48.95 688.53 L48.95 686.43 L48.95
+ 686.43 ZM57.38 686.43 L65.81 686.43 L65.81 688.53 L57.38 688.53 L57.38 686.43 L57.38 686.43 ZM72.14 686.43
+ L74.24 686.43 L74.24 688.53 L72.14 688.53 L72.14 686.43 L72.14 686.43 ZM80.57 686.43 L89 686.43 L89 688.53
+ L80.57 688.53 L80.57 686.43 L80.57 686.43 ZM95.32 686.43 L97.43 686.43 L97.43 688.53 L95.32 688.53 L95.32
+ 686.43 L95.32 686.43 ZM103.76 686.43 L112.19 686.43 L112.19 688.53 L103.76 688.53 L103.76 686.43 L103.76
+ 686.43 ZM118.51 686.43 L120.62 686.43 L120.62 688.53 L118.51 688.53 L118.51 686.43 L118.51 686.43 ZM126.94
+ 686.43 L135.38 686.43 L135.38 688.53 L126.94 688.53 L126.94 686.43 L126.94 686.43 ZM141.7 686.43 L143.81
+ 686.43 L143.81 688.53 L141.7 688.53 L141.7 686.43 L141.7 686.43 ZM150.13 686.43 L158.57 686.43 L158.57 688.53
+ L150.13 688.53 L150.13 686.43 L150.13 686.43 ZM164.89 686.43 L167 686.43 L167 688.53 L164.89 688.53 L164.89
+ 686.43 L164.89 686.43 ZM173.32 686.43 L181.75 686.43 L181.75 688.53 L173.32 688.53 L173.32 686.43 L173.32
+ 686.43 ZM188.08 686.43 L190.19 686.43 L190.19 688.53 L188.08 688.53 L188.08 686.43 L188.08 686.43 ZM196.51
+ 686.43 L204.94 686.43 L204.94 688.53 L196.51 688.53 L196.51 686.43 L196.51 686.43 ZM211.27 686.43 L213.38
+ 686.43 L213.38 688.53 L211.27 688.53 L211.27 686.43 L211.27 686.43 ZM219.7 686.43 L228.13 686.43 L228.13
+ 688.53 L219.7 688.53 L219.7 686.43 L219.7 686.43 ZM234.46 686.43 L236.56 686.43 L236.56 688.53 L234.46 688.53
+ L234.46 686.43 L234.46 686.43 ZM242.89 686.43 L251.32 686.43 L251.32 688.53 L242.89 688.53 L242.89 686.43
+ L242.89 686.43 ZM257.64 686.43 L259.75 686.43 L259.75 688.53 L257.64 688.53 L257.64 686.43 L257.64 686.43
+ ZM266.08 686.43 L274.51 686.43 L274.51 688.53 L266.08 688.53 L266.08 686.43 L266.08 686.43 ZM280.83 686.43
+ L282.94 686.43 L282.94 688.53 L280.83 688.53 L280.83 686.43 L280.83 686.43 ZM289.27 686.43 L297.7 686.43
+ L297.7 688.53 L289.27 688.53 L289.27 686.43 L289.27 686.43 ZM304.02 686.43 L306.13 686.43 L306.13 688.53
+ L304.02 688.53 L304.02 686.43 L304.02 686.43 ZM312.45 686.43 L320.89 686.43 L320.89 688.53 L312.45 688.53
+ L312.45 686.43 L312.45 686.43 ZM327.21 686.43 L329.32 686.43 L329.32 688.53 L327.21 688.53 L327.21 686.43
+ L327.21 686.43 ZM335.64 686.43 L344.08 686.43 L344.08 688.53 L335.64 688.53 L335.64 686.43 L335.64 686.43
+ ZM350.4 686.43 L352.51 686.43 L352.51 688.53 L350.4 688.53 L350.4 686.43 L350.4 686.43 ZM358.83 686.43 L367.26
+ 686.43 L367.26 688.53 L358.83 688.53 L358.83 686.43 L358.83 686.43 ZM373.59 686.43 L375.7 686.43 L375.7
+ 688.53 L373.59 688.53 L373.59 686.43 L373.59 686.43 ZM382.02 686.43 L387.06 686.43 L387.06 688.53 L382.02
+ 688.53 L382.02 686.43 L382.02 686.43 ZM13.18 694.06 L0 687.48 L13.18 680.9 L13.18 694.06 L13.18 694.06 ZM384.89
+ 680.9 L398.06 687.48 L384.89 694.06 L384.89 680.9 L384.89 680.9 Z" class="st2"/>
+ </g>
+ <g id="shape6-9" v:mID="6" v:groupContext="shape" transform="translate(2.5012,-522.82)">
+ <title>Sheet.6</title>
+ <path d="M0 597.01 L0 694.06 L154.68 694.06 L154.68 597.01 L0 597.01 L0 597.01 Z" class="st4"/>
+ </g>
+ <g id="shape7-11" v:mID="7" v:groupContext="shape" transform="translate(2.5012,-522.82)">
+ <title>Sheet.7</title>
+ <path d="M0 597.01 L154.68 597.01 L154.68 694.06 L0 694.06 L0 597.01" class="st5"/>
+ </g>
+ <g id="shape10-14" v:mID="10" v:groupContext="shape" transform="translate(159.025,-522.82)">
+ <title>Sheet.10</title>
+ <path d="M0 597.01 L0 694.06 L563.73 694.06 L563.73 597.01 L0 597.01 L0 597.01 Z" class="st6"/>
+ </g>
+ <g id="shape11-16" v:mID="11" v:groupContext="shape" transform="translate(159.025,-522.82)">
+ <title>Sheet.11</title>
+ <path d="M0 597.01 L563.73 597.01 L563.73 694.06 L0 694.06 L0 597.01" class="st7"/>
+ </g>
+ <g id="shape12-19" v:mID="12" v:groupContext="shape" transform="translate(262.039,-549.269)">
+ <title>Sheet.12</title>
+ <desc>Payload 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="97.4929" cy="673.065" width="194.99" height="41.9798"/>
+ <path d="M194.99 652.08 L0 652.08 L0 694.06 L194.99 694.06 L194.99 652.08" class="st8"/>
+ <text x="46.92" y="680.57" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 0</text> </g>
+ <g id="shape13-23" v:mID="13" v:groupContext="shape" transform="translate(547.615,-549.269)">
+ <title>Sheet.13</title>
+ <desc>Payload 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="87.5716" cy="673.065" width="175.15" height="41.9798"/>
+ <path d="M175.14 652.08 L0 652.08 L0 694.06 L175.14 694.06 L175.14 652.08" class="st8"/>
+ <text x="22.61" y="680.57" class="st9" v:langID="1033"><v:paragraph/><v:tabList/>Payload 1</text> </g>
+ <g id="shape15-27" v:mID="15" v:groupContext="shape" transform="translate(2.2377,-522.82)">
+ <title>Sheet.15</title>
+ <path d="M0 597.01 L0 694.06 L154.68 694.06 L154.68 597.01 L0 597.01 L0 597.01 Z" class="st4"/>
+ </g>
+ <g id="shape16-29" v:mID="16" v:groupContext="shape" transform="translate(2.2377,-522.82)">
+ <title>Sheet.16</title>
+ <path d="M0 597.01 L154.68 597.01 L154.68 694.06 L0 694.06 L0 597.01" class="st5"/>
+ </g>
+ <g id="shape17-32" v:mID="17" v:groupContext="shape" transform="translate(6.52106,-546.331)">
+ <title>Sheet.17</title>
+ <desc>Header</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="72.773" cy="673.065" width="145.55" height="41.9798"/>
+ <path d="M145.55 652.08 L0 652.08 L0 694.06 L145.55 694.06 L145.55 652.08" class="st8"/>
+ <text x="34.98" y="680.57" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Header</text> </g>
+ <g id="shape23-36" v:mID="23" v:groupContext="shape" transform="translate(286.548,-2.2377)">
+ <title>Sheet.23</title>
+ <path d="M0 597.27 L0 694.06 L345.2 694.06 L345.2 597.27 L0 597.27 L0 597.27 Z" class="st4"/>
+ </g>
+ <g id="shape24-38" v:mID="24" v:groupContext="shape" transform="translate(286.548,-2.2377)">
+ <title>Sheet.24</title>
+ <path d="M0 597.27 L345.2 597.27 L345.2 694.06 L0 694.06 L0 597.27" class="st5"/>
+ </g>
+ <g id="shape25-41" v:mID="25" v:groupContext="shape" transform="translate(399.834,-25.6887)">
+ <title>Sheet.25</title>
+ <desc>Header</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="72.773" cy="673.065" width="145.55" height="41.9798"/>
+ <path d="M145.55 652.08 L0 652.08 L0 694.06 L145.55 694.06 L145.55 652.08" class="st8"/>
+ <text x="13.76" y="683.56" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Header</text> </g>
+ <g id="shape31-45" v:mID="31" v:groupContext="shape" transform="translate(631.744,-2.2377)">
+ <title>Sheet.31</title>
+ <path d="M0 597.27 L0 694.06 L516.21 694.06 L516.21 597.27 L0 597.27 L0 597.27 Z" class="st6"/>
+ </g>
+ <g id="shape32-47" v:mID="32" v:groupContext="shape" transform="translate(631.744,-2.2377)">
+ <title>Sheet.32</title>
+ <path d="M0 597.27 L516.21 597.27 L516.21 694.06 L0 694.06 L0 597.27" class="st7"/>
+ </g>
+ <g id="shape33-50" v:mID="33" v:groupContext="shape" transform="translate(809.035,-25.6889)">
+ <title>Sheet.33</title>
+ <desc>Payload 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="97.4929" cy="673.065" width="194.99" height="41.9798"/>
+ <path d="M194.99 652.08 L0 652.08 L0 694.06 L194.99 694.06 L194.99 652.08" class="st8"/>
+ <text x="16.99" y="683.56" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 1</text> </g>
+ <g id="shape35-54" v:mID="35" v:groupContext="shape" transform="translate(1190.48,-32.2189)">
+ <title>Sheet.35</title>
+ <desc>Logical output segment</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="164.662" cy="678.273" width="329.33" height="31.5648"/>
+ <path d="M329.32 662.49 L0 662.49 L0 694.06 L329.32 694.06 L329.32 662.49" class="st8"/>
+ <text x="0" y="686.16" class="st11" v:langID="1033"><v:paragraph/><v:tabList/>Logical output segment</text> </g>
+ <g id="shape39-58" v:mID="39" v:groupContext="shape" transform="translate(546.25,-529.921)">
+ <title>Sheet.39</title>
+ <path d="M3.43 529.94 L3.46 543.61 L0.03 543.61 L0 529.94 L3.43 529.94 L3.43 529.94 ZM3.46 553.87 L3.46 557.29 L0.03
+ 557.29 L0.03 553.87 L3.46 553.87 L3.46 553.87 ZM3.46 567.55 L3.46 581.22 L0.03 581.22 L0.03 567.55 L3.46
+ 567.55 L3.46 567.55 ZM3.46 591.48 L3.46 594.9 L0.03 594.9 L0.03 591.48 L3.46 591.48 L3.46 591.48 ZM3.46
+ 605.16 L3.46 618.83 L0.03 618.83 L0.03 605.16 L3.46 605.16 L3.46 605.16 ZM3.46 629.09 L3.46 632.51 L0.03
+ 632.51 L0.03 629.09 L3.46 629.09 L3.46 629.09 ZM3.46 642.77 L3.46 656.45 L0.03 656.45 L0.03 642.77 L3.46
+ 642.77 L3.46 642.77 ZM3.46 666.7 L3.46 670.12 L0.03 670.12 L0.03 666.7 L3.46 666.7 L3.46 666.7 ZM3.46 680.38
+ L3.46 694.06 L0.03 694.06 L0.03 680.38 L3.46 680.38 L3.46 680.38 Z" class="st1"/>
+ </g>
+ <g id="shape40-60" v:mID="40" v:groupContext="shape" transform="translate(549.097,-223.749)">
+ <title>Sheet.40</title>
+ <path d="M6.65 402.61 L13.01 414.71 L9.98 416.32 L3.62 404.22 L6.65 402.61 L6.65 402.61 ZM17.82 423.78 L19.4 426.81 L16.37
+ 428.42 L14.79 425.39 L17.82 423.78 L17.82 423.78 ZM24.21 435.91 L30.57 448.01 L27.54 449.59 L21.18 437.49
+ L24.21 435.91 L24.21 435.91 ZM35.38 457.08 L36.96 460.11 L33.93 461.69 L32.35 458.66 L35.38 457.08 L35.38
+ 457.08 ZM41.73 469.18 L48.12 481.28 L45.09 482.86 L38.7 470.76 L41.73 469.18 L41.73 469.18 ZM52.9 490.36
+ L54.51 493.38 L51.48 494.99 L49.87 491.97 L52.9 490.36 L52.9 490.36 ZM59.29 502.45 L65.68 514.55 L62.65
+ 516.16 L56.26 504.06 L59.29 502.45 L59.29 502.45 ZM70.46 523.63 L72.07 526.65 L69.04 528.26 L67.43 525.24
+ L70.46 523.63 L70.46 523.63 ZM76.85 535.76 L83.24 547.86 L80.21 549.43 L73.82 537.34 L76.85 535.76 L76.85
+ 535.76 ZM88.01 556.93 L89.63 559.95 L86.6 561.53 L84.98 558.51 L88.01 556.93 L88.01 556.93 ZM94.4 569.03
+ L100.79 581.13 L97.76 582.7 L91.37 570.61 L94.4 569.03 L94.4 569.03 ZM105.57 590.2 L107.15 593.22 L104.12
+ 594.84 L102.54 591.81 L105.57 590.2 L105.57 590.2 ZM111.96 602.3 L118.32 614.4 L115.28 616.01 L108.93 603.91
+ L111.96 602.3 L111.96 602.3 ZM123.12 623.47 L124.71 626.5 L121.67 628.11 L120.09 625.08 L123.12 623.47 L123.12
+ 623.47 ZM129.51 635.6 L135.87 647.7 L132.84 649.28 L126.48 637.18 L129.51 635.6 L129.51 635.6 ZM140.68 656.77
+ L142.26 659.8 L139.23 661.38 L137.65 658.35 L140.68 656.77 L140.68 656.77 ZM147.04 668.87 L153.43 680.97
+ L150.4 682.55 L144.01 670.45 L147.04 668.87 L147.04 668.87 ZM158.2 690.04 L159.49 692.48 L156.46 694.06
+ L155.17 691.66 L158.2 690.04 L158.2 690.04 ZM0.33 408.43 L0 393.7 L11.99 402.28 L0.33 408.43 L0.33 408.43
+ Z" class="st1"/>
+ </g>
+ <g id="shape46-62" v:mID="46" v:groupContext="shape" transform="translate(66.8445,-221.499)">
+ <title>Sheet.46</title>
+ <path d="M0 611.87 C0 602.79 7.38 595.43 16.47 595.43 L273.39 595.43 C282.51 595.43 289.86 602.79 289.86 611.87 L289.86
+ 677.62 C289.86 686.72 282.51 694.06 273.39 694.06 L16.47 694.06 C7.38 694.06 -0 686.72 0 677.62 L0 611.87
+ Z" class="st4"/>
+ </g>
+ <g id="shape47-64" v:mID="47" v:groupContext="shape" transform="translate(66.8445,-221.499)">
+ <title>Sheet.47</title>
+ <path d="M0 611.87 C0 602.79 7.38 595.43 16.47 595.43 L273.39 595.43 C282.51 595.43 289.86 602.79 289.86 611.87 L289.86
+ 677.62 C289.86 686.72 282.51 694.06 273.39 694.06 L16.47 694.06 C7.38 694.06 -0 686.72 0 677.62 L0 611.87
+ Z" class="st12"/>
+ </g>
+ <g id="shape48-66" v:mID="48" v:groupContext="shape" transform="translate(113.27,-263.667)">
+ <title>Sheet.48</title>
+ <desc>Direct mbuf</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="98.5041" cy="675.59" width="197.01" height="36.9302"/>
+ <path d="M197.01 657.13 L0 657.13 L0 694.06 L197.01 694.06 L197.01 657.13" class="st8"/>
+ <text x="18.66" y="684.59" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Direct mbuf</text> </g>
+ <g id="shape51-70" v:mID="51" v:groupContext="shape" transform="translate(85.817,-233.439)">
+ <title>Sheet.51</title>
+ <desc>(copy of headers)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="127.916" cy="678.273" width="255.84" height="31.5648"/>
+ <path d="M255.83 662.49 L0 662.49 L0 694.06 L255.83 694.06 L255.83 662.49" class="st8"/>
+ <text x="34.33" y="685.47" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(copy of headers)</text> </g>
+ <g id="shape53-74" v:mID="53" v:groupContext="shape" transform="translate(371.944,-275.998)">
+ <title>Sheet.53</title>
+ <desc>next</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="33.3635" cy="678.273" width="66.73" height="31.5648"/>
+ <path d="M66.73 662.49 L0 662.49 L0 694.06 L66.73 694.06 L66.73 662.49" class="st8"/>
+ <text x="7.56" y="686.16" class="st15" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>next</text> </g>
+ <g id="shape54-78" v:mID="54" v:groupContext="shape" transform="translate(695.132,-646.04)">
+ <title>Sheet.54</title>
+ <path d="M0 655.39 L0 694.06 L100.4 694.06 L100.4 655.39 L0 655.39 L0 655.39 Z" class="st16"/>
+ </g>
+ <g id="shape55-80" v:mID="55" v:groupContext="shape" transform="translate(709.033,-648.946)">
+ <title>Sheet.55</title>
+ <desc>segsz</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="36.6265" cy="680.167" width="73.26" height="27.7775"/>
+ <path d="M73.25 666.28 L0 666.28 L0 694.06 L73.25 694.06 L73.25 666.28" class="st8"/>
+ <text x="7.6" y="687.11" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>segsz</text> </g>
+ <g id="shape56-84" v:mID="56" v:groupContext="shape" transform="translate(785.874,-521.182)">
+ <title>Sheet.56</title>
+ <path d="M0 597.27 L0 694.06 L363.41 694.06 L363.41 597.27 L0 597.27 L0 597.27 Z" class="st6"/>
+ </g>
+ <g id="shape57-86" v:mID="57" v:groupContext="shape" transform="translate(785.874,-521.182)">
+ <title>Sheet.57</title>
+ <path d="M0 597.27 L363.41 597.27 L363.41 694.06 L0 694.06 L0 597.27" class="st7"/>
+ </g>
+ <g id="shape58-89" v:mID="58" v:groupContext="shape" v:layerMember="0" transform="translate(943.158,-529.889)">
+ <title>Sheet.58</title>
+ <path d="M1.35 529.91 L1.25 543.58 L4.68 543.61 L4.78 529.94 L1.35 529.91 L1.35 529.91 ZM1.15 553.84 L1.12 557.26 L4.55
+ 557.29 L4.58 553.87 L1.15 553.84 L1.15 553.84 ZM1.05 567.52 L0.92 581.19 L4.35 581.22 L4.48 567.55 L1.05
+ 567.52 L1.05 567.52 ZM0.86 591.45 L0.82 594.87 L4.25 594.9 L4.28 591.48 L0.86 591.45 L0.86 591.45 ZM0.72
+ 605.13 L0.63 618.8 L4.05 618.83 L4.15 605.16 L0.72 605.13 L0.72 605.13 ZM0.53 629.06 L0.53 632.48 L3.95
+ 632.51 L3.95 629.09 L0.53 629.06 L0.53 629.06 ZM0.43 642.74 L0.33 656.41 L3.75 656.45 L3.85 642.77 L0.43
+ 642.74 L0.43 642.74 ZM0.23 666.67 L0.2 670.09 L3.62 670.12 L3.66 666.7 L0.23 666.67 L0.23 666.67 ZM0.13
+ 680.35 L0 694.02 L3.43 694.06 L3.56 680.38 L0.13 680.35 L0.13 680.35 Z" class="st18"/>
+ </g>
+ <g id="shape59-91" v:mID="59" v:groupContext="shape" transform="translate(785.874,-549.473)">
+ <title>Sheet.59</title>
+ <desc>Payload 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="77.3395" cy="673.065" width="154.68" height="41.9798"/>
+ <path d="M154.68 652.08 L0 652.08 L0 694.06 L154.68 694.06 L154.68 652.08" class="st8"/>
+ <text x="16.96" y="680.57" class="st9" v:langID="1033"><v:paragraph/><v:tabList/>Payload 1</text> </g>
+ <g id="shape60-95" v:mID="60" v:groupContext="shape" transform="translate(952.97,-548.822)">
+ <title>Sheet.60</title>
+ <desc>Payload 2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="97.4929" cy="673.065" width="194.99" height="41.9798"/>
+ <path d="M194.99 652.08 L0 652.08 L0 694.06 L194.99 694.06 L194.99 652.08" class="st8"/>
+ <text x="46.92" y="680.57" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload 2</text> </g>
+ <g id="shape63-99" v:mID="63" v:groupContext="shape" transform="translate(1190.48,-552.568)">
+ <title>Sheet.63</title>
+ <desc>Multi-segment input packet</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="181.707" cy="678.273" width="363.42" height="31.5648"/>
+ <path d="M363.41 662.49 L0 662.49 L0 694.06 L363.41 694.06 L363.41 662.49" class="st8"/>
+ <text x="0" y="686.16" class="st11" v:langID="1033"><v:paragraph/><v:tabList/>Multi-segment input packet</text> </g>
+ <g id="shape70-103" v:mID="70" v:groupContext="shape" v:layerMember="1" transform="translate(455.049,-221.499)">
+ <title>Sheet.70</title>
+ <path d="M0 611.87 C0 602.79 5.33 595.43 11.89 595.43 L282.92 595.43 C289.53 595.43 294.8 602.79 294.8 611.87 L294.8
+ 677.62 C294.8 686.76 289.53 694.06 282.92 694.06 L11.89 694.06 C5.33 694.06 0 686.76 0 677.62 L0 611.87
+ Z" class="st6"/>
+ </g>
+ <g id="shape71-105" v:mID="71" v:groupContext="shape" transform="translate(455.049,-221.499)">
+ <title>Sheet.71</title>
+ <path d="M0 611.87 C0 602.79 5.33 595.43 11.89 595.43 L282.92 595.43 C289.53 595.43 294.8 602.79 294.8 611.87 L294.8
+ 677.62 C294.8 686.76 289.53 694.06 282.92 694.06 L11.89 694.06 C5.33 694.06 0 686.76 0 677.62 L0 611.87
+ Z" class="st12"/>
+ </g>
+ <g id="shape72-107" v:mID="72" v:groupContext="shape" transform="translate(489.065,-263.434)">
+ <title>Sheet.72</title>
+ <desc>Indirect mbuf</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="112.128" cy="675.59" width="224.26" height="36.9302"/>
+ <path d="M224.26 657.13 L0 657.13 L0 694.06 L224.26 694.06 L224.26 657.13" class="st8"/>
+ <text x="20.73" y="684.59" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Indirect mbuf</text> </g>
+ <g id="shape75-111" v:mID="75" v:groupContext="shape" transform="translate(849.065,-281.435)">
+ <title>Sheet.75</title>
+ <desc>(pointer to data)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="100.199" cy="678.273" width="200.4" height="31.5648"/>
+ <path d="M200.4 662.49 L0 662.49 L0 694.06 L200.4 694.06 L200.4 662.49" class="st8"/>
+ <text x="4.49" y="686.16" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(pointer to data)</text> </g>
+ <g id="shape77-115" v:mID="77" v:groupContext="shape" transform="translate(717.742,-563.523)">
+ <title>Sheet.77</title>
+ <desc>next</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="33.3635" cy="678.273" width="66.73" height="31.5648"/>
+ <path d="M66.73 662.49 L0 662.49 L0 694.06 L66.73 694.06 L66.73 662.49" class="st8"/>
+ <text x="15.71" y="683.67" class="st19" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>next</text> </g>
+ <g id="shape78-119" v:mID="78" v:groupContext="shape" transform="translate(1148.17,-529.067)">
+ <title>Sheet.78</title>
+ <path d="M1.38 529.87 L1.25 543.55 L4.68 543.61 L4.81 529.94 L1.38 529.87 L1.38 529.87 ZM1.19 553.81 L1.12 557.23 L4.55
+ 557.29 L4.61 553.87 L1.19 553.81 L1.19 553.81 ZM1.05 567.48 L0.92 581.16 L4.35 581.22 L4.48 567.55 L1.05
+ 567.48 L1.05 567.48 ZM0.86 591.42 L0.86 594.84 L4.28 594.9 L4.28 591.48 L0.86 591.42 L0.86 591.42 ZM0.72
+ 605.09 L0.66 618.77 L4.08 618.83 L4.15 605.16 L0.72 605.09 L0.72 605.09 ZM0.53 629.03 L0.53 632.45 L3.95
+ 632.51 L3.95 629.09 L0.53 629.03 L0.53 629.03 ZM0.46 642.7 L0.33 656.38 L3.75 656.45 L3.89 642.77 L0.46
+ 642.7 L0.46 642.7 ZM0.26 666.64 L0.2 670.06 L3.62 670.12 L3.69 666.7 L0.26 666.64 L0.26 666.64 ZM0.13 680.31
+ L0 693.99 L3.43 694.06 L3.56 680.38 L0.13 680.31 L0.13 680.31 Z" class="st20"/>
+ </g>
+ <g id="shape79-121" v:mID="79" v:groupContext="shape" transform="translate(946.254,-657.81)">
+ <title>Sheet.79</title>
+ <path d="M11 686.69 L17.33 686.69 L17.33 688.27 L11 688.27 L11 686.69 L11 686.69 ZM22.07 686.69 L23.65 686.69 L23.65
+ 688.27 L22.07 688.27 L22.07 686.69 L22.07 686.69 ZM28.39 686.69 L34.72 686.69 L34.72 688.27 L28.39 688.27
+ L28.39 686.69 L28.39 686.69 ZM39.46 686.69 L41.04 686.69 L41.04 688.27 L39.46 688.27 L39.46 686.69 L39.46
+ 686.69 ZM45.78 686.69 L52.11 686.69 L52.11 688.27 L45.78 688.27 L45.78 686.69 L45.78 686.69 ZM56.85 686.69
+ L58.43 686.69 L58.43 688.27 L56.85 688.27 L56.85 686.69 L56.85 686.69 ZM63.18 686.69 L69.5 686.69 L69.5
+ 688.27 L63.18 688.27 L63.18 686.69 L63.18 686.69 ZM74.24 686.69 L75.82 686.69 L75.82 688.27 L74.24 688.27
+ L74.24 686.69 L74.24 686.69 ZM80.57 686.69 L86.89 686.69 L86.89 688.27 L80.57 688.27 L80.57 686.69 L80.57
+ 686.69 ZM91.63 686.69 L93.22 686.69 L93.22 688.27 L91.63 688.27 L91.63 686.69 L91.63 686.69 ZM97.96 686.69
+ L104.28 686.69 L104.28 688.27 L97.96 688.27 L97.96 686.69 L97.96 686.69 ZM109.03 686.69 L110.61 686.69 L110.61
+ 688.27 L109.03 688.27 L109.03 686.69 L109.03 686.69 ZM115.35 686.69 L121.67 686.69 L121.67 688.27 L115.35
+ 688.27 L115.35 686.69 L115.35 686.69 ZM126.42 686.69 L128 686.69 L128 688.27 L126.42 688.27 L126.42 686.69
+ L126.42 686.69 ZM132.74 686.69 L139.07 686.69 L139.07 688.27 L132.74 688.27 L132.74 686.69 L132.74 686.69
+ ZM143.81 686.69 L145.39 686.69 L145.39 688.27 L143.81 688.27 L143.81 686.69 L143.81 686.69 ZM150.13 686.69
+ L156.46 686.69 L156.46 688.27 L150.13 688.27 L150.13 686.69 L150.13 686.69 ZM161.2 686.69 L162.78 686.69
+ L162.78 688.27 L161.2 688.27 L161.2 686.69 L161.2 686.69 ZM167.53 686.69 L173.85 686.69 L173.85 688.27 L167.53
+ 688.27 L167.53 686.69 L167.53 686.69 ZM178.59 686.69 L180.17 686.69 L180.17 688.27 L178.59 688.27 L178.59
+ 686.69 L178.59 686.69 ZM184.92 686.69 L189.4 686.69 L189.4 688.27 L184.92 688.27 L184.92 686.69 L184.92
+ 686.69 ZM13.18 694.06 L0 687.41 L13.18 680.9 L13.18 694.06 L13.18 694.06 ZM187.22 680.9 L200.4 687.48 L187.22
+ 694.06 L187.22 680.9 L187.22 680.9 Z" class="st20"/>
+ </g>
+ <g id="shape80-123" v:mID="80" v:groupContext="shape" transform="translate(982.882,-643.673)">
+ <title>Sheet.80</title>
+ <path d="M0 655.13 L0 694.06 L127.01 694.06 L127.01 655.13 L0 655.13 L0 655.13 Z" class="st16"/>
+ </g>
+ <g id="shape81-125" v:mID="81" v:groupContext="shape" transform="translate(1003.39,-660.621)">
+ <title>Sheet.81</title>
+ <desc>pkt_len</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="48.6041" cy="680.956" width="97.21" height="26.1994"/>
+ <path d="M97.21 667.86 L0 667.86 L0 694.06 L97.21 694.06 L97.21 667.86" class="st8"/>
+ <text x="11.67" y="687.5" class="st21" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>pkt_len </text> </g>
+ <g id="shape82-129" v:mID="82" v:groupContext="shape" transform="translate(1001.18,-634.321)">
+ <title>Sheet.82</title>
+ <desc>% segsz</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="49.2945" cy="680.956" width="98.59" height="26.1994"/>
+ <path d="M98.59 667.86 L0 667.86 L0 694.06 L98.59 694.06 L98.59 667.86" class="st8"/>
+ <text x="9.09" y="687.5" class="st21" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>% segsz</text> </g>
+ <g id="shape34-133" v:mID="34" v:groupContext="shape" v:layerMember="0" transform="translate(356.703,-264.106)">
+ <title>Sheet.34</title>
+ <path d="M0 685.77 L90.67 685.77 L90.67 689.19 L0 689.19 L0 685.77 L0 685.77 ZM89.36 680.9 L97.21 687.48 L89.36 694.06
+ L89.36 680.9 L89.36 680.9 Z" class="st2"/>
+ </g>
+ <g id="shape85-135" v:mID="85" v:groupContext="shape" v:layerMember="0" transform="translate(78.5359,-282.66)">
+ <title>Sheet.85</title>
+ <path d="M0 680.87 C-0 673.59 6.88 667.69 15.37 667.69 C23.86 667.69 30.73 673.59 30.73 680.87 C30.73 688.15 23.86 694.06
+ 15.37 694.06 C6.88 694.06 0 688.15 0 680.87 Z" class="st16"/>
+ </g>
+ <g id="shape87-137" v:mID="87" v:groupContext="shape" v:layerMember="0" transform="translate(85.4791,-284.062)">
+ <title>Sheet.87</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="8.66303" cy="683.269" width="17.33" height="21.5726"/>
+ <path d="M17.33 672.48 L0 672.48 L0 694.06 L17.33 694.06 L17.33 672.48" class="st8"/>
+ <text x="3.32" y="688.66" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape88-141" v:mID="88" v:groupContext="shape" v:layerMember="0" transform="translate(468.906,-282.66)">
+ <title>Sheet.88</title>
+ <path d="M0 680.87 C-0 673.59 6.89 667.69 15.37 667.69 C23.86 667.69 30.73 673.59 30.73 680.87 C30.73 688.15 23.86 694.06
+ 15.37 694.06 C6.89 694.06 -0 688.15 0 680.87 Z" class="st16"/>
+ </g>
+ <g id="shape90-143" v:mID="90" v:groupContext="shape" v:layerMember="0" transform="translate(474.575,-284.062)">
+ <title>Sheet.90</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="8.66303" cy="683.269" width="17.33" height="21.5726"/>
+ <path d="M17.33 672.48 L0 672.48 L0 694.06 L17.33 694.06 L17.33 672.48" class="st8"/>
+ <text x="3.32" y="688.66" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape95-147" v:mID="95" v:groupContext="shape" v:layerMember="0" transform="translate(764.026,-275.998)">
+ <title>Sheet.95</title>
+ <desc>next</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="33.3635" cy="678.273" width="66.73" height="31.5648"/>
+ <path d="M66.73 662.49 L0 662.49 L0 694.06 L66.73 694.06 L66.73 662.49" class="st8"/>
+ <text x="7.56" y="686.16" class="st15" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>next</text> </g>
+ <g id="shape97-151" v:mID="97" v:groupContext="shape" v:layerMember="0" transform="translate(844.524,-219.79)">
+ <title>Sheet.97</title>
+ <path d="M0 611.87 C0 602.79 5.45 595.43 12.17 595.43 L289.54 595.43 C296.31 595.43 301.71 602.79 301.71 611.87 L301.71
+ 677.62 C301.71 686.76 296.31 694.06 289.54 694.06 L12.17 694.06 C5.45 694.06 0 686.76 0 677.62 L0 611.87
+ Z" class="st12"/>
+ </g>
+ <g id="shape100-153" v:mID="100" v:groupContext="shape" v:layerMember="0" transform="translate(751.857,-262.528)">
+ <title>Sheet.100</title>
+ <path d="M0 685.77 L90.67 685.77 L90.67 689.19 L0 689.19 L0 685.77 L0 685.77 ZM89.36 680.9 L97.21 687.48 L89.36 694.06
+ L89.36 680.9 L89.36 680.9 Z" class="st2"/>
+ </g>
+ <g id="shape104-155" v:mID="104" v:groupContext="shape" v:layerMember="1" transform="translate(851.429,-218.08)">
+ <title>Sheet.104</title>
+ <path d="M0 611.87 C0 602.79 5.33 595.43 11.89 595.43 L282.92 595.43 C289.53 595.43 294.8 602.79 294.8 611.87 L294.8
+ 677.62 C294.8 686.76 289.53 694.06 282.92 694.06 L11.89 694.06 C5.33 694.06 0 686.76 0 677.62 L0 611.87
+ Z" class="st6"/>
+ </g>
+ <g id="shape105-157" v:mID="105" v:groupContext="shape" v:layerMember="0" transform="translate(885.444,-260.015)">
+ <title>Sheet.105</title>
+ <desc>Indirect mbuf</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="112.128" cy="675.59" width="224.26" height="36.9302"/>
+ <path d="M224.26 657.13 L0 657.13 L0 694.06 L224.26 694.06 L224.26 657.13" class="st8"/>
+ <text x="20.73" y="684.59" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Indirect mbuf</text> </g>
+ <g id="shape106-161" v:mID="106" v:groupContext="shape" v:layerMember="0" transform="translate(895.672,-229.419)">
+ <title>Sheet.106</title>
+ <desc>(pointer to data)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="100.199" cy="678.273" width="200.4" height="31.5648"/>
+ <path d="M200.4 662.49 L0 662.49 L0 694.06 L200.4 694.06 L200.4 662.49" class="st8"/>
+ <text x="12.86" y="685.47" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(pointer to data)</text> </g>
+ <g id="shape107-165" v:mID="107" v:groupContext="shape" v:layerMember="0" transform="translate(863.297,-280.442)">
+ <title>Sheet.107</title>
+ <path d="M0 680.87 C-0 673.59 6.89 667.69 15.37 667.69 C23.86 667.69 30.73 673.59 30.73 680.87 C30.73 688.15 23.86 694.06
+ 15.37 694.06 C6.89 694.06 -0 688.15 0 680.87 Z" class="st16"/>
+ </g>
+ <g id="shape108-167" v:mID="108" v:groupContext="shape" v:layerMember="0" transform="translate(870.001,-281.547)">
+ <title>Sheet.108</title>
+ <desc>3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="8.66303" cy="683.269" width="17.33" height="21.5726"/>
+ <path d="M17.33 672.48 L0 672.48 L0 694.06 L17.33 694.06 L17.33 672.48" class="st8"/>
+ <text x="3.32" y="688.66" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>3</text> </g>
+ <g id="shape109-171" v:mID="109" v:groupContext="shape" v:layerMember="0" transform="translate(500.959,-231.87)">
+ <title>Sheet.109</title>
+ <desc>(pointer to data)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="100.199" cy="678.273" width="200.4" height="31.5648"/>
+ <path d="M200.4 662.49 L0 662.49 L0 694.06 L200.4 694.06 L200.4 662.49" class="st8"/>
+ <text x="12.86" y="685.47" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(pointer to data)</text> </g>
+ <g id="shape113-175" v:mID="113" v:groupContext="shape" v:layerMember="0" transform="translate(1187.64,-251.712)">
+ <title>Sheet.113</title>
+ <desc>Three-part output segment</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)" v:tabSpace="42.5197"/>
+ <v:textRect cx="327.402" cy="678.273" width="654.81" height="31.5648"/>
+ <path d="M654.8 662.49 L0 662.49 L0 694.06 L654.8 694.06 L654.8 662.49" class="st8"/>
+ <text x="0" y="686.16" class="st11" v:langID="1033"><v:paragraph/><v:tabList/>Three-part output segment</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/mbuf1.svg b/doc/guides/prog_guide/img/mbuf1.svg
index 5bd84d1b..8750424e 100644
--- a/doc/guides/prog_guide/img/mbuf1.svg
+++ b/doc/guides/prog_guide/img/mbuf1.svg
@@ -482,7 +482,7 @@
sodipodi:role="line"
x="187.85715"
y="347.7193"
- id="tspan5240">(m-&gt;buf_physaddr is the</tspan><tspan
+ id="tspan5240">(m-&gt;buf_iova is the</tspan><tspan
sodipodi:role="line"
x="187.85715"
y="360.2193"
diff --git a/doc/guides/prog_guide/img/member_i1.svg b/doc/guides/prog_guide/img/member_i1.svg
new file mode 100644
index 00000000..fc5f56ac
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i1.svg
@@ -0,0 +1,1613 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Generated by Microsoft Visio 11.0, SVG Export, v1.0 memship_i1.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="7.18709in" height="4.75757in"
+ viewBox="0 0 517.471 342.545" xml:space="preserve" color-interpolation-filters="sRGB" class="st61">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:none;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:3}
+ .st3 {fill:#5b9bd5;font-family:Calibri;font-size:0.666664em;opacity:0.219608}
+ .st4 {font-size:1em}
+ .st5 {fill:none;stroke:#41719c;stroke-width:3}
+ .st6 {fill:#5b9bd5;font-family:Calibri;font-size:0.666664em}
+ .st7 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em;opacity:0.219608}
+ .st8 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em}
+ .st9 {fill:#5b9bd5;fill-opacity:0.22;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st10 {fill:#5b9bd5;stroke:#c8c8c8;stroke-width:0.25}
+ .st11 {fill:none;stroke:none;stroke-width:0.25}
+ .st12 {fill:#ffffff;font-family:Calibri;font-size:0.499992em;font-weight:bold}
+ .st13 {fill:#ffffff;font-family:Calibri;font-size:0.75em;font-weight:bold}
+ .st14 {marker-end:url(#mrkr5-63);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st15 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st16 {fill:#5b9bd5;font-family:Calibri;font-size:0.499992em;font-weight:bold}
+ .st17 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st18 {fill:#feffff;font-family:Calibri;font-size:0.499992em}
+ .st19 {fill:#deebf6;stroke:#c8c8c8;stroke-width:0.25}
+ .st20 {fill:#000000;font-family:Calibri;font-size:0.499992em}
+ .st21 {marker-end:url(#mrkr5-178);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st22 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st23 {fill:#ff0000;font-family:Calibri;font-size:0.666664em}
+ .st24 {fill:#5b9bd5;fill-opacity:0.22}
+ .st25 {stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st26 {fill:#ffffff}
+ .st27 {stroke:#0070c0;stroke-width:0.25}
+ .st28 {fill:#5b9bd5;stroke:#0070c0;stroke-width:0.25}
+ .st29 {fill:#5b9bd5;stroke:#ffffff;stroke-width:0.25}
+ .st30 {fill:#5b9bd5}
+ .st31 {stroke:#c8c8c8;stroke-width:0.25}
+ .st32 {fill:#acccea;stroke:#c8c8c8;stroke-width:0.25}
+ .st33 {fill:#5b9bd5;fill-opacity:0.22;stroke:none;stroke-linecap:butt;stroke-width:0.75}
+ .st34 {fill:#000000;fill-opacity:0;stroke:none;stroke-linecap:butt;stroke-width:0.75}
+ .st35 {fill:url(#grad30-309);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st36 {fill:url(#grad25-313);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st37 {fill:url(#grad35-317);stroke:#308dda;stroke-linecap:butt;stroke-width:0.130208}
+ .st38 {fill:url(#grad36-325);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st39 {fill:url(#grad40-335);stroke:#000000;stroke-linecap:butt;stroke-width:0.130208}
+ .st40 {fill:url(#grad39-342);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st41 {fill:url(#grad40-355);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st42 {fill:none}
+ .st43 {stroke:#308dda;stroke-linecap:butt;stroke-width:0.130208}
+ .st44 {stroke:#ffffff;stroke-linecap:butt;stroke-width:0.130208}
+ .st45 {fill:url(#grad30-383);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st46 {fill:url(#grad36-396);stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st47 {fill:none;stroke:#c8c8c8;stroke-width:0.75}
+ .st48 {fill:#9a9a9a;stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0833333}
+ .st49 {fill:url(#grad40-415);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0833333}
+ .st50 {fill:url(#grad40-419);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0833333}
+ .st51 {stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.25}
+ .st52 {fill:url(#grad35-430);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0833333}
+ .st53 {stroke:#c8c8c8;stroke-width:0.75}
+ .st54 {stroke:#4f88bb;stroke-width:0.75}
+ .st55 {fill:#feffff;font-family:Calibri;font-size:0.416656em}
+ .st56 {fill:#5b9bd5;fill-opacity:0.25;stroke:#5b9bd5;stroke-opacity:0.25;stroke-width:0.75}
+ .st57 {fill:#4f88bb;stroke:#41719c;stroke-width:0.75}
+ .st58 {fill:none;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:2.25}
+ .st59 {fill:none;stroke:#0070c0;stroke-width:2.25}
+ .st60 {fill:#595959;font-family:Arial;font-size:0.666664em}
+ .st61 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Patterns_And_Gradients">
+ <linearGradient id="grad30-309" v:fillPattern="30" v:foreground="#97c2e6" v:background="#4274a2" x1="0" y1="1" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#97c2e6;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#4274a2;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad25-313" v:fillPattern="25" v:foreground="#5491d3" v:background="#246ba6" x1="0" y1="0" x2="1"
+ y2="0">
+ <stop offset="0" style="stop-color:#5491d3;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#246ba6;stop-opacity:1"/>
+ </linearGradient>
+ <pattern id="grad35-317" v:fillPattern="35" v:foreground="#569bd3" v:background="#aed0ec" x="0" y="0" width="1" height="1"
+ patternContentUnits="objectBoundingBox">
+ <path d="M 0.5 0.5 L 0 0 L 0 1 z" style="fill:url(#grad27-318)"/>
+ <path d="M 0.5 0.5 L 1 0 L 1 1 z" style="fill:url(#grad25-319)"/>
+ <path d="M 0.5 0.5 L 0 0 L 1 0 z" style="fill:url(#grad30-320)"/>
+ <path d="M 0.5 0.5 L 0 1 L 1 1 z" style="fill:url(#grad28-321)"/>
+ </pattern>
+ <linearGradient id="grad27-318" v:fillPattern="35" v:foreground="#569bd3" v:background="#aed0ec" x1="1" y1="0" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#569bd3;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#aed0ec;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad25-319" v:fillPattern="35" v:foreground="#569bd3" v:background="#aed0ec" x1="0" y1="0" x2="1"
+ y2="0">
+ <stop offset="0" style="stop-color:#569bd3;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#aed0ec;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad30-320" v:fillPattern="35" v:foreground="#569bd3" v:background="#aed0ec" x1="0" y1="1" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#569bd3;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#aed0ec;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad28-321" v:fillPattern="35" v:foreground="#569bd3" v:background="#aed0ec" x1="0" y1="0" x2="0"
+ y2="1">
+ <stop offset="0" style="stop-color:#569bd3;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#aed0ec;stop-opacity:1"/>
+ </linearGradient>
+ <radialGradient id="grad36-325" v:fillPattern="36" v:foreground="#c0dff1" v:background="#246ba6" cx="0" cy="0" r="1">
+ <stop offset="0" style="stop-color:#c0dff1;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#246ba6;stop-opacity:1"/>
+ </radialGradient>
+ <radialGradient id="grad40-335" v:fillPattern="40" v:foreground="#c8e5c8" v:background="#19bf19" cx="0.5" cy="0.5" r="0.5">
+ <stop offset="0" style="stop-color:#c8e5c8;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#19bf19;stop-opacity:1"/>
+ </radialGradient>
+ <radialGradient id="grad39-342" v:fillPattern="39" v:foreground="#5599d7" v:background="#b9daf2" cx="1" cy="1" r="1">
+ <stop offset="0" style="stop-color:#5599d7;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#b9daf2;stop-opacity:1"/>
+ </radialGradient>
+ <radialGradient id="grad40-355" v:fillPattern="40" v:foreground="#5599d7" v:background="#214383" cx="0.5" cy="0.5" r="0.5">
+ <stop offset="0" style="stop-color:#5599d7;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#214383;stop-opacity:1"/>
+ </radialGradient>
+ <linearGradient id="grad30-383" v:fillPattern="30" v:foreground="#97c2e6" v:background="#6ba4dc" x1="0" y1="1" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#97c2e6;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#6ba4dc;stop-opacity:1"/>
+ </linearGradient>
+ <radialGradient id="grad36-396" v:fillPattern="36" v:foreground="#89bee9" v:background="#b9daf2" cx="0" cy="0" r="1">
+ <stop offset="0" style="stop-color:#89bee9;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#b9daf2;stop-opacity:1"/>
+ </radialGradient>
+ <radialGradient id="grad40-415" v:fillPattern="40" v:foreground="#000000" v:background="#ffffff" cx="0.5" cy="0.5" r="0.5">
+ <stop offset="0" style="stop-color:#000000;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#ffffff;stop-opacity:1"/>
+ </radialGradient>
+ <radialGradient id="grad40-419" v:fillPattern="40" v:foreground="#ffffff" v:background="#9a9a9a" cx="0.5" cy="0.5" r="0.5">
+ <stop offset="0" style="stop-color:#ffffff;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#9a9a9a;stop-opacity:1"/>
+ </radialGradient>
+ <pattern id="grad35-430" v:fillPattern="35" v:foreground="#ffffff" v:background="#ffcc00" x="0" y="0" width="1" height="1"
+ patternContentUnits="objectBoundingBox">
+ <path d="M 0.5 0.5 L 0 0 L 0 1 z" style="fill:url(#grad27-431)"/>
+ <path d="M 0.5 0.5 L 1 0 L 1 1 z" style="fill:url(#grad25-432)"/>
+ <path d="M 0.5 0.5 L 0 0 L 1 0 z" style="fill:url(#grad30-433)"/>
+ <path d="M 0.5 0.5 L 0 1 L 1 1 z" style="fill:url(#grad28-434)"/>
+ </pattern>
+ <linearGradient id="grad27-431" v:fillPattern="35" v:foreground="#ffffff" v:background="#ffcc00" x1="1" y1="0" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#ffffff;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#ffcc00;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad25-432" v:fillPattern="35" v:foreground="#ffffff" v:background="#ffcc00" x1="0" y1="0" x2="1"
+ y2="0">
+ <stop offset="0" style="stop-color:#ffffff;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#ffcc00;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad30-433" v:fillPattern="35" v:foreground="#ffffff" v:background="#ffcc00" x1="0" y1="1" x2="0"
+ y2="0">
+ <stop offset="0" style="stop-color:#ffffff;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#ffcc00;stop-opacity:1"/>
+ </linearGradient>
+ <linearGradient id="grad28-434" v:fillPattern="35" v:foreground="#ffffff" v:background="#ffcc00" x1="0" y1="0" x2="0"
+ y2="1">
+ <stop offset="0" style="stop-color:#ffffff;stop-opacity:1"/>
+ <stop offset="1" style="stop-color:#ffcc00;stop-opacity:1"/>
+ </linearGradient>
+ </defs>
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-63" class="st15" v:arrowType="5" v:arrowSize="2" v:setback="6.16" refX="-6.16" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-178" class="st22" v:arrowType="5" v:arrowSize="2" v:setback="5.8" refX="-5.8" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <v:layer v:name="Flowchart" v:index="0"/>
+ <g id="group165-1" transform="translate(21.7794,-24.0978)" v:mID="165" v:groupContext="group">
+ <title>Sheet.165</title>
+ <g id="group1-2" transform="translate(308.647,-25.7109)" v:mID="1" v:groupContext="group">
+ <title>Sheet.1</title>
+ <g id="shape2-3" v:mID="2" v:groupContext="shape" transform="translate(11.5732,-58.1913)">
+ <title>Circle</title>
+ <desc>List 1 matching Criteria 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.2233" cy="315.322" width="47.65" height="40.835"/>
+ <g id="shadow2-4" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st2"/>
+ <text x="18.79" y="308.12" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 1 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>1</text> </g>
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st5"/>
+ <text x="18.79" y="308.12" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 1 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>1</text> </g>
+ <g id="shape3-13" v:mID="3" v:groupContext="shape" transform="translate(58.9839,-58.9839)">
+ <title>Circle.23</title>
+ <desc>List 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.2233" cy="315.322" width="47.65" height="40.835"/>
+ <g id="shadow3-14" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st2"/>
+ <text x="17.73" y="318.02" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 2</text> </g>
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st5"/>
+ <text x="17.73" y="318.02" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 2</text> </g>
+ <g id="shape4-19" v:mID="4" v:groupContext="shape">
+ <title>Circle.24</title>
+ <desc>List 1 matching Criteria 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.2233" cy="315.322" width="47.65" height="40.835"/>
+ <g id="shadow4-20" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st2"/>
+ <text x="18.79" y="308.12" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 1 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>1</text> </g>
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st5"/>
+ <text x="18.79" y="308.12" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 1 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>1</text> </g>
+ <g id="group5-29" transform="translate(50.7413,-4.53722)" v:mID="5" v:groupContext="group">
+ <title>Sheet.5</title>
+ <g id="shape6-30" v:mID="6" v:groupContext="shape" transform="translate(344.2,300.5) rotate(90)">
+ <title>Triangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow6-31" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,1.9728,-0.3456)" class="st1">
+ <path d="M42.04 342.55 L21.02 318.64 L0 342.55 L42.04 342.55 Z" class="st9"/>
+ </g>
+ <path d="M42.04 342.55 L21.02 318.64 L0 342.55 L42.04 342.55 Z" class="st10"/>
+ </g>
+ <g id="shape7-34" v:mID="7" v:groupContext="shape" transform="translate(-0.884982,-14.7157)">
+ <title>Sheet.7</title>
+ <desc>setsum</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="12.9268" cy="336.238" width="25.86" height="12.6135"/>
+ <rect x="0" y="329.932" width="25.8535" height="12.6135" class="st11"/>
+ <text x="6.37" y="334.44" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>setsu<tspan
+ x="10.49" dy="1.2em" class="st4">m</tspan></text> </g>
+ </g>
+ <g id="shape8-38" v:mID="8" v:groupContext="shape" transform="translate(72.5955,0)">
+ <title>Circle.29</title>
+ <desc>List 2 matching Criteria 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.2233" cy="315.322" width="47.65" height="40.835"/>
+ <g id="shadow8-39" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st2"/>
+ <text x="18.79" y="308.12" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 2 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>2</text> </g>
+ <path d="M0 315.32 A27.2233 27.2233 0 1 1 54.45 315.32 A27.2233 27.2233 0 1 1 0 315.32 Z" class="st5"/>
+ <text x="18.79" y="308.12" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>List 2 <tspan
+ x="12.08" dy="1.2em" class="st4">matching </tspan><tspan x="12.29" dy="1.2em" class="st4">Criteria </tspan>2</text> </g>
+ </g>
+ <g id="group9-48" transform="translate(31.6515,-49.9094)" v:mID="9" v:groupContext="group">
+ <title>Sheet.9</title>
+ <g id="group10-49" transform="translate(99.5691,0)" v:mID="10" v:groupContext="group">
+ <title>Sheet.10</title>
+ <g id="shape11-50" v:mID="11" v:groupContext="shape" transform="translate(346.175,275.999) rotate(90)">
+ <title>Triangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-51" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,1.9728,-0.3456)" class="st1">
+ <path d="M66.55 342.55 L33.27 290.12 L0 342.55 L66.55 342.55 Z" class="st9"/>
+ </g>
+ <path d="M66.55 342.55 L33.27 290.12 L0 342.55 L66.55 342.55 Z" class="st10"/>
+ </g>
+ <g id="shape12-54" v:mID="12" v:groupContext="shape" transform="translate(355.063,285.074) rotate(90)">
+ <title>Sheet.12</title>
+ <desc>Set Summary</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="24.1985" cy="332.563" width="48.4" height="19.9638"/>
+ <rect x="0" y="322.581" width="48.397" height="19.9638" class="st11"/>
+ <text x="18.25" y="329.86" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Set <tspan
+ x="6.38" dy="1.2em" class="st4">Summary</tspan></text> </g>
+ </g>
+ <g id="shape13-58" v:mID="13" v:groupContext="shape" transform="translate(57.5835,-54.4467)">
+ <title>Sheet.13</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L38.9 342.55" class="st14"/>
+ </g>
+ <g id="shape14-64" v:mID="14" v:groupContext="shape" transform="translate(20.2363,-51.8439)">
+ <title>Sheet.14</title>
+ <desc>Flow Key</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="25.3328" cy="333.471" width="50.67" height="18.1489"/>
+ <rect x="0" y="324.396" width="50.6656" height="18.1489" class="st11"/>
+ <text x="14.12" y="335.27" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Key</text> </g>
+ <g id="shape15-67" v:mID="15" v:groupContext="shape" transform="translate(5.02911,1.60865) rotate(-26.0815)">
+ <title>Sheet.15</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L39.25 342.55" class="st14"/>
+ </g>
+ <g id="shape16-72" v:mID="16" v:groupContext="shape" transform="translate(155.629,-33.273)">
+ <title>Sheet.16</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L38.34 342.55" class="st14"/>
+ </g>
+ <g id="shape17-77" v:mID="17" v:groupContext="shape" transform="translate(304.141,0.595416) rotate(25.6934)">
+ <title>Sheet.17</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L42.68 342.55" class="st14"/>
+ </g>
+ <g id="shape18-82" v:mID="18" v:groupContext="shape" transform="translate(102.642,654.842) rotate(180)">
+ <title>Sheet.18</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L30.14 342.55" class="st14"/>
+ </g>
+ <g id="shape19-87" v:mID="19" v:groupContext="shape" transform="translate(-15.1809,-33.9928)">
+ <title>Sheet.19</title>
+ <desc>New Flow =&#62; New Assignment</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="42.75" cy="338.045" width="85.5" height="9"/>
+ <rect x="0" y="333.545" width="85.5" height="9" class="st11"/>
+ <text x="5.06" y="339.85" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>New Flow =&#62; New Assignment</text> </g>
+ <g id="shape20-90" v:mID="20" v:groupContext="shape" transform="translate(102.844,679.041) rotate(180)">
+ <title>Sheet.20</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L30.14 342.55" class="st14"/>
+ </g>
+ <g id="shape21-95" v:mID="21" v:groupContext="shape" transform="translate(-35.4309,-11.4928)">
+ <title>Sheet.21</title>
+ <desc>Old Flow =&#62; forward to specific thread</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="337.971" width="108" height="9.14889"/>
+ <rect x="0" y="333.396" width="108" height="9.14889" class="st11"/>
+ <text x="6.36" y="339.77" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Old Flow =&#62; forward to specific thread</text> </g>
+ <g id="shape22-98" v:mID="22" v:groupContext="shape" transform="translate(541.496,275.999) rotate(90)">
+ <title>Sheet.22</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 335.81 C2.14 344.21 5.09 343.6 7.56 340.31 C10.62 336.25 12.94 328.1 18.15 335.81" class="st17"/>
+ </g>
+ <g id="shape23-101" v:mID="23" v:groupContext="shape" transform="translate(541.496,300.198) rotate(90)">
+ <title>Sheet.23</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 335.81 C2.14 344.21 5.09 343.6 7.56 340.31 C10.62 336.25 12.94 328.1 18.15 335.81" class="st17"/>
+ </g>
+ <g id="shape24-104" v:mID="24" v:groupContext="shape" transform="translate(541.496,324.396) rotate(90)">
+ <title>Sheet.24</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 335.81 C2.14 344.21 5.09 343.6 7.56 340.31 C10.62 336.25 12.94 328.1 18.15 335.81" class="st17"/>
+ </g>
+ </g>
+ <g id="group25-107" transform="translate(285.961,-178.628)" v:mID="25" v:groupContext="group">
+ <title>Sheet.25</title>
+ <g id="shape26-108" v:mID="26" v:groupContext="shape" transform="translate(51.2583,-51.2583)">
+ <title>Circle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow26-109" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st9"/>
+ </g>
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st10"/>
+ </g>
+ <g id="shape27-112" v:mID="27" v:groupContext="shape" transform="translate(107.177,-55.9182)">
+ <title>Circle.156</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow27-113" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st9"/>
+ </g>
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st10"/>
+ </g>
+ <g id="shape28-116" v:mID="28" v:groupContext="shape" transform="translate(79.2174,-83.8773)">
+ <title>Circle.157</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow28-117" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st9"/>
+ </g>
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st10"/>
+ </g>
+ <g id="shape29-120" v:mID="29" v:groupContext="shape" transform="translate(153.775,-51.2583)">
+ <title>Circle.158</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow29-121" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st9"/>
+ </g>
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st10"/>
+ </g>
+ <g id="shape30-124" v:mID="30" v:groupContext="shape" transform="translate(93.197,-18.6394)">
+ <title>Circle.159</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow30-125" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st9"/>
+ </g>
+ <path d="M0 333.23 A9.3197 9.3197 0 0 1 18.64 333.23 A9.3197 9.3197 0 0 1 0 333.23 Z" class="st10"/>
+ </g>
+ <g id="shape31-128" v:mID="31" v:groupContext="shape" transform="translate(27.4102,-57.9329) rotate(-7.12502)">
+ <title>Sheet.31</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L31.41 342.55" class="st14"/>
+ </g>
+ <g id="shape32-133" v:mID="32" v:groupContext="shape" transform="translate(182.13,-60.5772) rotate(9.46232)">
+ <title>Sheet.32</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L22.18 342.55" class="st14"/>
+ </g>
+ <g id="shape33-138" v:mID="33" v:groupContext="shape" transform="translate(47.8843,595.237) rotate(-160.346)">
+ <title>Sheet.33</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L63.11 342.55" class="st14"/>
+ </g>
+ <g id="shape34-143" v:mID="34" v:groupContext="shape" transform="translate(292.945,525.785) rotate(141.977)">
+ <title>Sheet.34</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L20.97 342.55" class="st14"/>
+ </g>
+ <g id="shape35-148" v:mID="35" v:groupContext="shape" transform="translate(-95.8971,591.793) rotate(-145.945)">
+ <title>Sheet.35</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L28.55 342.55" class="st14"/>
+ </g>
+ <g id="shape36-153" v:mID="36" v:groupContext="shape" transform="translate(37.2788,2.27374E-013)">
+ <title>Rectangle.167</title>
+ <desc>SUM</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="10.8652" cy="335.555" width="21.74" height="13.9795"/>
+ <g id="shadow36-154" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="328.566" width="21.7305" height="13.9795" class="st9"/>
+ </g>
+ <rect x="0" y="328.566" width="21.7305" height="13.9795" class="st10"/>
+ <text x="5" y="337.36" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>SUM</text> </g>
+ <g id="shape37-158" v:mID="37" v:groupContext="shape" transform="translate(55.9182,2.27374E-013)">
+ <title>Rectangle.168</title>
+ <desc>Packet</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="23.2992" cy="335.555" width="46.6" height="13.9795"/>
+ <g id="shadow37-159" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="328.566" width="46.5985" height="13.9795" class="st9"/>
+ </g>
+ <rect x="0" y="328.566" width="46.5985" height="13.9795" class="st19"/>
+ <text x="15.18" y="337.36" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Packet</text> </g>
+ <g id="shape38-163" v:mID="38" v:groupContext="shape" transform="translate(-1.65867E-013,-32.6189)">
+ <title>Rectangle.169</title>
+ <desc>SUM</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="10.3796" cy="335.555" width="20.76" height="13.9795"/>
+ <g id="shadow38-164" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="328.566" width="20.7593" height="13.9795" class="st9"/>
+ </g>
+ <rect x="0" y="328.566" width="20.7593" height="13.9795" class="st10"/>
+ <text x="4.51" y="337.36" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>SUM</text> </g>
+ <g id="shape39-168" v:mID="39" v:groupContext="shape" transform="translate(18.6394,-32.6189)">
+ <title>Rectangle.170</title>
+ <desc>Packet</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="23.2992" cy="335.555" width="46.6" height="13.9795"/>
+ <g id="shadow39-169" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="328.566" width="46.5985" height="13.9795" class="st9"/>
+ </g>
+ <rect x="0" y="328.566" width="46.5985" height="13.9795" class="st19"/>
+ <text x="15.18" y="337.36" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Packet</text> </g>
+ <g id="shape40-173" v:mID="40" v:groupContext="shape" transform="translate(197.019,626.053) rotate(161.565)">
+ <title>Sheet.40</title>
+ <path d="M0 328.31 A55.7483 27.2427 -124.2 0 0 42.37 334.19 L42.47 333.85" class="st21"/>
+ </g>
+ <g id="shape41-179" v:mID="41" v:groupContext="shape" transform="translate(154.607,584.177) rotate(161.121)">
+ <title>Sheet.41</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 319.39 A80.5593 29.9756 -101.99 0 0 41.7 325.37 L41.79 325.02" class="st21"/>
+ </g>
+ <g id="shape42-184" v:mID="42" v:groupContext="shape" transform="translate(3.02481,-66.7025)">
+ <title>Sheet.42</title>
+ <desc>Encode ID</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="19.4569" cy="335.555" width="38.92" height="13.9795"/>
+ <rect x="0" y="328.566" width="38.9138" height="13.9795" class="st11"/>
+ <text x="7.51" y="333.16" class="st23" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Encode <tspan
+ x="15.99" dy="1.2em" class="st4">ID</tspan></text> </g>
+ </g>
+ <g id="group43-188" transform="translate(12.0993,-165.858)" v:mID="43" v:groupContext="group">
+ <title>Sheet.43</title>
+ <g id="group44-189" transform="translate(7.21495,-75.757)" v:mID="44" v:groupContext="group" v:layerMember="0">
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ConnGap" v:prompt="" v:val="VT0(0.083333333333333):0"/>
+ </v:userDefs>
+ <title>User</title>
+ <g id="shape45-190" v:mID="45" v:groupContext="shape" v:layerMember="0"
+ transform="translate(13.3353,-1.13687E-013)">
+ <title>Sheet.45</title>
+ <g id="shadow45-191" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z"
+ class="st24"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st25"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77"
+ class="st25"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st25"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st25"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st25"/>
+ <path d="M0 337.32 L13.47 337.32" class="st25"/>
+ </g>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z" class="st26"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st27"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77" class="st27"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st27"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st27"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st27"/>
+ <path d="M0 337.32 L13.47 337.32" class="st27"/>
+ </g>
+ <g id="shape46-206" v:mID="46" v:groupContext="shape" v:layerMember="0" transform="translate(0,-8.39743)">
+ <title>Sheet.46</title>
+ <g id="shadow46-207" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06
+ C19.97 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23
+ C2.35 324.38 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09
+ 336.04 L21.09 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06
+ 340.51 L21.18 337.33 Z" class="st9"/>
+ </g>
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06 C19.97
+ 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23 C2.35 324.38
+ 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09 336.04 L21.09
+ 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06 340.51 L21.18 337.33
+ Z" class="st28"/>
+ </g>
+ <g id="shape47-210" v:mID="47" v:groupContext="shape" v:layerMember="0" transform="translate(3.19243,-16.175)">
+ <title>Sheet.47</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M16.62 342.55 L16.62 333.29 C16.62 333.23 16.61 333.18 16.58 333.13 C16.55 333.07 16.5 333.02 16.44
+ 332.98 C16.39 332.95 16.33 332.94 16.27 332.94 L0.35 332.94 C0.29 332.94 0.24 332.95 0.19 332.98
+ C0.13 333.01 0.08 333.07 0.04 333.12 C0.02 333.17 0 333.23 0 333.29 L0 342.55 L16.62 342.55
+ Z" class="st29"/>
+ </g>
+ <g id="shape48-212" v:mID="48" v:groupContext="shape" v:layerMember="0" transform="translate(1.97942,-10.81)">
+ <title>Sheet.48</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M0.96 340.83 L0 342.55 L19.06 342.55 L18.1 340.83 L0.96 340.83 Z" class="st26"/>
+ </g>
+ </g>
+ <g id="group49-215" transform="translate(7.21495,-47.1858)" v:mID="49" v:groupContext="group" v:layerMember="0">
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ConnGap" v:prompt="" v:val="VT0(0.083333333333333):0"/>
+ </v:userDefs>
+ <title>User.7</title>
+ <g id="shape50-216" v:mID="50" v:groupContext="shape" v:layerMember="0"
+ transform="translate(13.3353,-1.13687E-013)">
+ <title>Sheet.50</title>
+ <g id="shadow50-217" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z"
+ class="st24"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st25"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77"
+ class="st25"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st25"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st25"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st25"/>
+ <path d="M0 337.32 L13.47 337.32" class="st25"/>
+ </g>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z" class="st26"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st27"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77" class="st27"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st27"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st27"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st27"/>
+ <path d="M0 337.32 L13.47 337.32" class="st27"/>
+ </g>
+ <g id="shape51-232" v:mID="51" v:groupContext="shape" v:layerMember="0" transform="translate(0,-8.39743)">
+ <title>Sheet.51</title>
+ <g id="shadow51-233" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06
+ C19.97 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23
+ C2.35 324.38 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09
+ 336.04 L21.09 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06
+ 340.51 L21.18 337.33 Z" class="st9"/>
+ </g>
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06 C19.97
+ 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23 C2.35 324.38
+ 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09 336.04 L21.09
+ 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06 340.51 L21.18 337.33
+ Z" class="st28"/>
+ </g>
+ <g id="shape52-236" v:mID="52" v:groupContext="shape" v:layerMember="0" transform="translate(3.19243,-16.175)">
+ <title>Sheet.52</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M16.62 342.55 L16.62 333.29 C16.62 333.23 16.61 333.18 16.58 333.13 C16.55 333.07 16.5 333.02 16.44
+ 332.98 C16.39 332.95 16.33 332.94 16.27 332.94 L0.35 332.94 C0.29 332.94 0.24 332.95 0.19 332.98
+ C0.13 333.01 0.08 333.07 0.04 333.12 C0.02 333.17 0 333.23 0 333.29 L0 342.55 L16.62 342.55
+ Z" class="st29"/>
+ </g>
+ <g id="shape53-238" v:mID="53" v:groupContext="shape" v:layerMember="0" transform="translate(1.97942,-10.81)">
+ <title>Sheet.53</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M0.96 340.83 L0 342.55 L19.06 342.55 L18.1 340.83 L0.96 340.83 Z" class="st26"/>
+ </g>
+ </g>
+ <g id="group54-241" transform="translate(7.21495,-18.6146)" v:mID="54" v:groupContext="group" v:layerMember="0">
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ConnGap" v:prompt="" v:val="VT0(0.083333333333333):0"/>
+ </v:userDefs>
+ <title>User.12</title>
+ <g id="shape55-242" v:mID="55" v:groupContext="shape" v:layerMember="0"
+ transform="translate(13.3353,-1.13687E-013)">
+ <title>Sheet.55</title>
+ <g id="shadow55-243" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z"
+ class="st24"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st25"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52
+ 13.66 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55
+ L21.12 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36
+ 329.46 C22.2 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77"
+ class="st25"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st25"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st25"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st25"/>
+ <path d="M0 337.32 L13.47 337.32" class="st25"/>
+ </g>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42 ZM20.96
+ 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77 Z" class="st26"/>
+ <path d="M20.77 325.42 A2.63551 2.63601 -180 1 0 15.5 325.42 A2.63551 2.63601 -180 1 0 20.77 325.42"
+ class="st27"/>
+ <path d="M20.96 328.77 L15.25 328.77 C14.84 328.77 14.46 328.91 14.16 329.14 C13.95 329.31 13.78 329.52 13.66
+ 329.76 C13.54 330 13.47 330.27 13.47 330.55 L13.47 337.32 L15.03 337.32 L15.03 342.55 L21.12
+ 342.55 L21.12 337.32 L22.74 337.32 L22.74 330.55 C22.74 330.14 22.6 329.76 22.36 329.46 C22.2
+ 329.25 21.99 329.08 21.75 328.96 C21.51 328.84 21.24 328.77 20.96 328.77" class="st27"/>
+ <path d="M18.1 342.55 L18.1 338.37" class="st27"/>
+ <path d="M15.03 337.32 L15.03 333.71" class="st27"/>
+ <path d="M21.12 337.32 L21.12 333.71" class="st27"/>
+ <path d="M0 337.32 L13.47 337.32" class="st27"/>
+ </g>
+ <g id="shape56-258" v:mID="56" v:groupContext="shape" v:layerMember="0" transform="translate(0,-8.39743)">
+ <title>Sheet.56</title>
+ <g id="shadow56-259" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06
+ C19.97 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23
+ C2.35 324.38 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09
+ 336.04 L21.09 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06
+ 340.51 L21.18 337.33 Z" class="st9"/>
+ </g>
+ <path d="M21.09 325.52 C21.09 325.13 20.96 324.79 20.74 324.51 C20.59 324.32 20.4 324.16 20.19 324.06 C19.97
+ 323.95 19.72 323.89 19.46 323.89 L3.55 323.89 C3.16 323.89 2.82 324.02 2.54 324.23 C2.35 324.38
+ 2.19 324.57 2.09 324.79 C1.98 325.01 1.92 325.25 1.92 325.52 L1.92 336.04 L21.09 336.04 L21.09
+ 325.52 ZM21.18 337.33 L1.77 337.33 L0 340.51 L0 342.55 L23.06 342.55 L23.06 340.51 L21.18 337.33
+ Z" class="st28"/>
+ </g>
+ <g id="shape57-262" v:mID="57" v:groupContext="shape" v:layerMember="0" transform="translate(3.19243,-16.175)">
+ <title>Sheet.57</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M16.62 342.55 L16.62 333.29 C16.62 333.23 16.61 333.18 16.58 333.13 C16.55 333.07 16.5 333.02 16.44
+ 332.98 C16.39 332.95 16.33 332.94 16.27 332.94 L0.35 332.94 C0.29 332.94 0.24 332.95 0.19 332.98
+ C0.13 333.01 0.08 333.07 0.04 333.12 C0.02 333.17 0 333.23 0 333.29 L0 342.55 L16.62 342.55
+ Z" class="st29"/>
+ </g>
+ <g id="shape58-264" v:mID="58" v:groupContext="shape" v:layerMember="0" transform="translate(1.97942,-10.81)">
+ <title>Sheet.58</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <path d="M0.96 340.83 L0 342.55 L19.06 342.55 L18.1 340.83 L0.96 340.83 Z" class="st26"/>
+ </g>
+ </g>
+ <g id="group59-267" transform="translate(171.161,-45.6707)" v:mID="59" v:groupContext="group" v:layerMember="0">
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ConnGap" v:prompt="" v:val="VT0(0.083333333333333):0"/>
+ </v:userDefs>
+ <title>Data Center</title>
+ <g id="shape60-268" v:mID="60" v:groupContext="shape" v:layerMember="0">
+ <title>Sheet.60</title>
+ <g id="shadow60-269" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <ellipse cx="37.8785" cy="331.299" rx="37.8785" ry="11.246" class="st9"/>
+ </g>
+ <ellipse cx="37.8785" cy="331.299" rx="37.8785" ry="11.246" class="st10"/>
+ </g>
+ <g id="shape61-272" v:mID="61" v:groupContext="shape" v:layerMember="0" transform="translate(6.86487,-7.30475)">
+ <title>Sheet.61</title>
+ <g id="shadow61-273" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M54.1 311.79 L43.28 311.79 L43.28 342.55 L62.03 342.55 L62.03 311.79 L54.1 311.79 ZM43.28 332.44
+ L43.28 311.79 L51.21 311.79 L51.21 301.69 L32.33 301.69 L32.33 311.79 L40.39 311.79 L40.39
+ 332.44 L43.28 332.44 ZM40.39 301.69 L40.39 293.03 L21.64 293.03 L21.64 301.69 L29.57 301.69
+ L29.57 311.79 L32.46 311.79 L32.46 301.69 L40.39 301.69 ZM32.46 311.79 L21.64 311.79 L21.64
+ 342.55 L40.39 342.55 L40.39 311.79 L32.46 311.79 ZM10.82 311.79 L0 311.79 L0 342.55 L18.75
+ 342.55 L18.75 311.79 L10.82 311.79 ZM21.64 311.79 L29.57 311.79 L29.57 301.69 L10.82 301.69
+ L10.82 311.79 L18.75 311.79 L18.75 332.44 L21.64 332.44 L21.64 311.79 Z" class="st9"/>
+ </g>
+ <path d="M54.1 311.79 L43.28 311.79 L43.28 342.55 L62.03 342.55 L62.03 311.79 L54.1 311.79 ZM43.28 332.44
+ L43.28 311.79 L51.21 311.79 L51.21 301.69 L32.33 301.69 L32.33 311.79 L40.39 311.79 L40.39 332.44
+ L43.28 332.44 ZM40.39 301.69 L40.39 293.03 L21.64 293.03 L21.64 301.69 L29.57 301.69 L29.57
+ 311.79 L32.46 311.79 L32.46 301.69 L40.39 301.69 ZM32.46 311.79 L21.64 311.79 L21.64 342.55
+ L40.39 342.55 L40.39 311.79 L32.46 311.79 ZM10.82 311.79 L0 311.79 L0 342.55 L18.75 342.55 L18.75
+ 311.79 L10.82 311.79 ZM21.64 311.79 L29.57 311.79 L29.57 301.69 L10.82 301.69 L10.82 311.79
+ L18.75 311.79 L18.75 332.44 L21.64 332.44 L21.64 311.79 Z" class="st10"/>
+ </g>
+ <g id="shape62-276" v:mID="62" v:groupContext="shape" v:layerMember="0" transform="translate(20.0835,-20.5174)">
+ <title>Sheet.62</title>
+ <g id="shadow62-277" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M45.36 341.36 A1.13296 1.18615 -180 1 0 43.09 341.36 A1.13296 1.18615 -180 1 0 45.36 341.36
+ ZM23.46 341.36 A1.13296 1.18615 -180 1 0 21.2 341.36 A1.13296 1.18615 -180 1 0 23.46 341.36
+ ZM2.27 341.36 A1.13296 1.18615 -180 1 0 0 341.36 A1.13296 1.18615 -180 1 0 2.27 341.36 Z"
+ class="st24"/>
+ </g>
+ <path d="M45.36 341.36 A1.13296 1.18615 -180 1 0 43.09 341.36 A1.13296 1.18615 -180 1 0 45.36 341.36 ZM23.46
+ 341.36 A1.13296 1.18615 -180 1 0 21.2 341.36 A1.13296 1.18615 -180 1 0 23.46 341.36 ZM2.27 341.36
+ A1.13296 1.18615 -180 1 0 0 341.36 A1.13296 1.18615 -180 1 0 2.27 341.36 Z" class="st30"/>
+ </g>
+ <g id="shape63-282" v:mID="63" v:groupContext="shape" v:layerMember="0" transform="translate(14.2717,-12.5134)">
+ <title>Sheet.63</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow63-283" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M43.09 342.55 L51.17 342.55 L51.17 341.74 L43.09 341.74 L43.09 342.55 ZM43.09 340.12 L51.17
+ 340.12 L51.17 339.32 L43.09 339.32 L43.09 340.12 ZM43.09 337.69 L51.17 337.69 L51.17 336.89
+ L43.09 336.89 L43.09 337.69 ZM21.2 342.55 L29.27 342.55 L29.27 341.74 L21.2 341.74 L21.2
+ 342.55 ZM21.2 340.12 L29.27 340.12 L29.27 339.32 L21.2 339.32 L21.2 340.12 ZM21.2 337.69
+ L29.27 337.69 L29.27 336.89 L21.2 336.89 L21.2 337.69 ZM-0 342.55 L8.08 342.55 L8.08 341.74
+ L-0 341.74 L-0 342.55 ZM-0 340.12 L8.08 340.12 L8.08 339.32 L-0 339.32 L-0 340.12 ZM-0 337.69
+ L8.08 337.69 L8.08 336.89 L-0 336.89 L-0 337.69 Z" class="st24"/>
+ </g>
+ <path d="M43.09 342.55 L51.17 342.55 L51.17 341.74 L43.09 341.74 L43.09 342.55 ZM43.09 340.12 L51.17 340.12
+ L51.17 339.32 L43.09 339.32 L43.09 340.12 ZM43.09 337.69 L51.17 337.69 L51.17 336.89 L43.09
+ 336.89 L43.09 337.69 ZM21.2 342.55 L29.27 342.55 L29.27 341.74 L21.2 341.74 L21.2 342.55 ZM21.2
+ 340.12 L29.27 340.12 L29.27 339.32 L21.2 339.32 L21.2 340.12 ZM21.2 337.69 L29.27 337.69 L29.27
+ 336.89 L21.2 336.89 L21.2 337.69 ZM-0 342.55 L8.08 342.55 L8.08 341.74 L-0 341.74 L-0 342.55
+ ZM-0 340.12 L8.08 340.12 L8.08 339.32 L-0 339.32 L-0 340.12 ZM-0 337.69 L8.08 337.69 L8.08 336.89
+ L-0 336.89 L-0 337.69 Z" class="st26"/>
+ </g>
+ </g>
+ <g id="group64-288" transform="translate(59.5234,-47.1858)" v:mID="64" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey=""
+ v:invis="true" v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Device)"/>
+ <v:cp v:nameU="SubShapeType" v:lbl="SubShapeType" v:prompt="" v:type="0" v:format="" v:sortKey=""
+ v:invis="true" v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Load balancer)"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT14({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Load balancer</title>
+ <g id="shape65-289" v:mID="65" v:groupContext="shape" transform="translate(0,-1.653)">
+ <title>Sheet.65</title>
+ <g id="shadow65-290" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M12.18 329.62 L4.06 329.62 L0 332.02 L0 342.55 L16.23 342.55 L16.23 332.02 L12.18 329.62 Z"
+ class="st24"/>
+ <path d="M0 332.02 L16.23 332.02" class="st25"/>
+ <path d="M12.18 329.62 L4.06 329.62 L0 332.02 L0 342.55 L16.23 342.55 L16.23 332.02 L12.18 329.62"
+ class="st25"/>
+ </g>
+ <path d="M12.18 329.62 L4.06 329.62 L0 332.02 L0 342.55 L16.23 342.55 L16.23 332.02 L12.18 329.62 Z"
+ class="st30"/>
+ <path d="M0 332.02 L16.23 332.02" class="st31"/>
+ <path d="M12.18 329.62 L4.06 329.62 L0 332.02 L0 342.55 L16.23 342.55 L16.23 332.02 L12.18 329.62"
+ class="st31"/>
+ </g>
+ <g id="shape66-297" v:mID="66" v:groupContext="shape" transform="translate(1.81062,-2.91583)">
+ <title>Sheet.66</title>
+ <g id="shadow66-298" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M10.22 341.92 L9.29 342.12 L9.95 342.55 L11.2 342.23 L10.99 340.96 L10.33 340.52 L10.53 341.44
+ L8.34 340.01 L8.03 340.49 L10.22 341.92 ZM11.46 338.22 L8.84 338.22 L8.84 338.78 L11.45
+ 338.78 L10.78 339.45 L11.57 339.45 L12.45 338.5 L11.57 337.55 L10.78 337.55 L11.46 338.22
+ ZM10.48 335.2 L8.29 336.64 L8.6 337.12 L10.79 335.68 L10.59 336.61 L11.25 336.17 L11.46
+ 334.9 L10.21 334.58 L9.55 335.01 L10.48 335.2 ZM6.25 336.37 C5.11 336.37 4.19 337.29 4.19
+ 338.43 C4.19 339.56 5.11 340.48 6.25 340.48 C7.38 340.48 8.31 339.56 8.31 338.43 C8.31 337.29
+ 7.38 336.37 6.25 336.37 ZM6.25 337.02 C7.02 337.02 7.66 337.65 7.66 338.43 C7.66 339.2 7.02
+ 339.83 6.25 339.83 C5.47 339.83 4.84 339.2 4.84 338.43 C4.84 337.65 5.47 337.02 6.25 337.02
+ ZM2.62 338.14 L0 338.14 L0 338.71 L2.62 338.71 L1.94 339.38 L2.74 339.38 L3.61 338.43 L2.73
+ 337.47 L1.95 337.47 L2.62 338.14 Z" class="st9"/>
+ </g>
+ <path d="M10.22 341.92 L9.29 342.12 L9.95 342.55 L11.2 342.23 L10.99 340.96 L10.33 340.52 L10.53 341.44 L8.34
+ 340.01 L8.03 340.49 L10.22 341.92 ZM11.46 338.22 L8.84 338.22 L8.84 338.78 L11.45 338.78 L10.78
+ 339.45 L11.57 339.45 L12.45 338.5 L11.57 337.55 L10.78 337.55 L11.46 338.22 ZM10.48 335.2 L8.29
+ 336.64 L8.6 337.12 L10.79 335.68 L10.59 336.61 L11.25 336.17 L11.46 334.9 L10.21 334.58 L9.55
+ 335.01 L10.48 335.2 ZM6.25 336.37 C5.11 336.37 4.19 337.29 4.19 338.43 C4.19 339.56 5.11 340.48
+ 6.25 340.48 C7.38 340.48 8.31 339.56 8.31 338.43 C8.31 337.29 7.38 336.37 6.25 336.37 ZM6.25
+ 337.02 C7.02 337.02 7.66 337.65 7.66 338.43 C7.66 339.2 7.02 339.83 6.25 339.83 C5.47 339.83
+ 4.84 339.2 4.84 338.43 C4.84 337.65 5.47 337.02 6.25 337.02 ZM2.62 338.14 L0 338.14 L0 338.71
+ L2.62 338.71 L1.94 339.38 L2.74 339.38 L3.61 338.43 L2.73 337.47 L1.95 337.47 L2.62 338.14 Z"
+ class="st32"/>
+ </g>
+ </g>
+ <g id="group67-301" transform="translate(104.617,-86.5795)" v:mID="67" v:groupContext="group">
+ <v:userDefs>
+ <v:ud v:nameU="SkinColor" v:prompt="" v:val="VT5(#da8c36)"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <title>Directory server</title>
+ <g id="shape68-302" v:mID="68" v:groupContext="shape" transform="translate(0,-0.451005)">
+ <title>Sheet.68</title>
+ <g id="shadow68-303" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0.47 329.86 L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24
+ L18.24 340.22 L22.24 342.55 L22.24 339.27 L36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16
+ 321.13 L3.16 321.5 L0 319.68 L0 329.58 L0.47 329.86 Z" class="st33"/>
+ </g>
+ <path d="M0.47 329.86 L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24 L18.24
+ 340.22 L22.24 342.55 L22.24 339.27 L36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16 321.13 L3.16
+ 321.5 L0 319.68 L0 329.58 L0.47 329.86 Z" class="st34"/>
+ </g>
+ <g id="shape69-306" v:mID="69" v:groupContext="shape" transform="translate(3.1636,-11.8063)">
+ <title>Sheet.69</title>
+ <path d="M16.48 323.24 L32.91 332.66 L16.31 342.55 L0 333.26 L0 332.52 L16.48 323.24 Z" class="st35"/>
+ </g>
+ <g id="shape70-310" v:mID="70" v:groupContext="shape" transform="translate(19.06,-3.68954)">
+ <title>Sheet.70</title>
+ <path d="M17.01 324.55 L0 334.19 L3.18 342.55 L17.01 334.56 L17.01 324.55 Z" class="st36"/>
+ </g>
+ <g id="shape71-314" v:mID="71" v:groupContext="shape" transform="translate(0,-0.415652)">
+ <title>Sheet.71</title>
+ <path d="M22.24 342.55 L0 329.58 L0 319.68 L22.24 332.43 L22.24 342.55 Z" class="st37"/>
+ </g>
+ <g id="shape72-322" v:mID="72" v:groupContext="shape" transform="translate(0.82443,-19.8334)">
+ <title>Sheet.72</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape73-326" v:mID="73" v:groupContext="shape" transform="translate(3.62283,-15.1638)">
+ <title>Sheet.73</title>
+ <path d="M3.22 339.78 A1.86495 1.86495 -180 0 0 1.11 338.38 A1.8709 1.8709 -180 0 0 0.38 340.85 A1.86532
+ 1.86532 -180 0 0 2.5 342.25 A1.87264 1.87264 -180 0 0 3.22 339.78 Z" class="st38"/>
+ </g>
+ <g id="shape74-329" v:mID="74" v:groupContext="shape" transform="translate(3.62283,-10.4867)">
+ <title>Sheet.74</title>
+ <path d="M3.22 339.78 A1.86495 1.86495 -180 0 0 1.11 338.38 A1.8709 1.8709 -180 0 0 0.38 340.85 A1.86532
+ 1.86532 -180 0 0 2.5 342.25 A1.87264 1.87264 -180 0 0 3.22 339.78 Z" class="st38"/>
+ </g>
+ <g id="shape75-332" v:mID="75" v:groupContext="shape" transform="translate(4.52404,-16.3668)">
+ <title>Sheet.75</title>
+ <path d="M1.61 341.16 a0.931952 0.931952 -180 0 0 -1.05741 -0.702645 a0.935408 0.935408 -180 0 0 -0.361118
+ 1.23585 a0.932139 0.932139 -180 0 0 1.05794 0.702645 a0.935822 0.935822 -180 0 0 0.360589 -1.23585
+ Z" class="st39"/>
+ </g>
+ <g id="shape76-336" v:mID="76" v:groupContext="shape" transform="translate(4.52404,-11.6897)">
+ <title>Sheet.76</title>
+ <path d="M1.61 341.16 a0.931952 0.931952 -180 0 0 -1.05741 -0.702645 a0.935875 0.935875 -180 0 0 -0.361118
+ 1.23585 a0.932139 0.932139 -180 0 0 1.05794 0.702645 a0.935822 0.935822 -180 0 0 0.360589 -1.23585
+ Z" class="st39"/>
+ </g>
+ <g id="shape77-339" v:mID="77" v:groupContext="shape" transform="translate(7.78787,-8.83469)">
+ <title>Sheet.77</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape78-343" v:mID="78" v:groupContext="shape" transform="translate(10.204,-7.4008)">
+ <title>Sheet.78</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape79-346" v:mID="79" v:groupContext="shape" transform="translate(12.6196,-5.96639)">
+ <title>Sheet.79</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape80-349" v:mID="80" v:groupContext="shape" transform="translate(15.0357,-4.53251)">
+ <title>Sheet.80</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape81-352" v:mID="81" v:groupContext="shape" transform="translate(8.24006,-10.0631)">
+ <title>Sheet.81</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape82-356" v:mID="82" v:groupContext="shape" transform="translate(10.6556,-8.62924)">
+ <title>Sheet.82</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape83-359" v:mID="83" v:groupContext="shape" transform="translate(13.0717,-7.19483)">
+ <title>Sheet.83</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape84-362" v:mID="84" v:groupContext="shape" transform="translate(15.4873,-5.76095)">
+ <title>Sheet.84</title>
+ <path d="M0.85 342.24 a0.388502 0.388502 0 0 1 -0.425717 0.308698 a0.638367 0.638367 0 0 1 -0.424129 -0.573447
+ L0 336.5 a0.387272 0.387272 0 0 1 0.424129 -0.308698 a0.638235 0.638235 0 0 1 0.425717 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape85-365" v:mID="85" v:groupContext="shape" transform="translate(7.78787,-9.81214)">
+ <title>Sheet.85</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape86-368" v:mID="86" v:groupContext="shape" transform="translate(10.204,-8.37826)">
+ <title>Sheet.86</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape87-371" v:mID="87" v:groupContext="shape" transform="translate(12.6196,-6.94385)">
+ <title>Sheet.87</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape88-374" v:mID="88" v:groupContext="shape" transform="translate(15.0357,-5.50996)">
+ <title>Sheet.88</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape89-377" v:mID="89" v:groupContext="shape" transform="translate(7.78787,-4.53251)">
+ <title>Sheet.89</title>
+ <path d="M9.08 334.57 L9.03 342.55 L7.25 341.57 L9.08 334.57 ZM6.66 333.14 L6.61 341.11 L4.83 340.13 L6.66
+ 333.14 ZM4.25 331.7 L4.2 339.68 L2.42 338.7 L4.25 331.7 ZM1.83 330.27 L1.78 338.24 L0 337.27
+ L1.83 330.27 Z" class="st42"/>
+ <path d="M9.08 334.57 L9.03 342.55 L7.25 341.57M6.66 333.14 L6.61 341.11 L4.83 340.13M4.25 331.7 L4.2 339.68
+ L2.42 338.7M1.83 330.27 L1.78 338.24 L0 337.27" class="st44"/>
+ </g>
+ <g id="shape90-380" v:mID="90" v:groupContext="shape" transform="translate(2.22125,-11.8454)">
+ <title>Sheet.90</title>
+ <path d="M0 341.85 L0.63 341.42 L1.42 341.78 L0.03 342.55 L0 341.85 Z" class="st45"/>
+ </g>
+ <g id="shape91-384" v:mID="91" v:groupContext="shape" transform="translate(17.1796,-3.17487)">
+ <title>Sheet.91</title>
+ <path d="M0 341.85 L0.63 341.42 L1.42 341.78 L0.03 342.55 L0 341.85 Z" class="st45"/>
+ </g>
+ <g id="shape92-387" v:mID="92" v:groupContext="shape" transform="translate(1.46036,-10.3893)">
+ <title>Sheet.92</title>
+ <path d="M2.12 341.35 L0 342.55 L0 333.54 L2.12 332.29 L2.12 333.41 L0.79 334.15 L0.79 341.09 L2.18 340.33
+ L2.12 341.35 Z" class="st36"/>
+ </g>
+ <g id="shape93-390" v:mID="93" v:groupContext="shape" transform="translate(16.4187,-1.71875)">
+ <title>Sheet.93</title>
+ <path d="M2.12 341.35 L0 342.55 L0 333.54 L2.12 332.29 L2.12 333.41 L0.79 334.15 L0.79 341.09 L2.18 340.33
+ L2.12 341.35 Z" class="st36"/>
+ </g>
+ <g id="shape94-393" v:mID="94" v:groupContext="shape" transform="translate(0.467548,-10.3893)">
+ <title>Sheet.94</title>
+ <path d="M0.99 333.54 L3.11 332.29 L2.12 331.66 L0 332.91 L0 341.92 L0.99 342.55 L0.99 333.54 Z"
+ class="st46"/>
+ </g>
+ <g id="shape95-397" v:mID="95" v:groupContext="shape" transform="translate(15.4259,-1.71875)">
+ <title>Sheet.95</title>
+ <path d="M0.99 333.54 L3.11 332.29 L2.12 331.66 L0 332.91 L0 341.92 L0.99 342.55 L0.99 333.54 Z"
+ class="st46"/>
+ </g>
+ <g id="shape96-400" v:mID="96" v:groupContext="shape" transform="translate(0.467548,-1.71928)">
+ <title>Sheet.96</title>
+ <path d="M17.34 339.96 L16.75 340.37 L16.75 334.15 L18.07 333.41 L18.07 332.29 L17.08 331.66 L14.96 332.91
+ L14.96 341.92 L15.95 342.55 L18.07 341.35 L18.14 340.33 L17.34 339.96 ZM2.38 331.29 L1.79 331.7
+ L1.79 325.48 L3.11 324.74 L3.11 323.62 L2.12 322.99 L0 324.24 L0 333.25 L0.99 333.87 L3.11 332.68
+ L3.18 331.66 L2.38 331.29 Z" class="st47"/>
+ </g>
+ <g id="shape97-402" v:mID="97" v:groupContext="shape" transform="translate(19.9526,-8.71396)">
+ <title>Sheet.97</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape98-405" v:mID="98" v:groupContext="shape" transform="translate(19.9526,-2.35997)">
+ <title>Sheet.98</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape99-408" v:mID="99" v:groupContext="shape" transform="translate(0,-0.415652)">
+ <title>Sheet.99</title>
+ <path d="M36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16 321.13 L3.16 321.52 L0 319.68 L0 329.58 L0.47 329.86
+ L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24 L18.24 340.22
+ L22.24 342.55 L22.24 339.27 L36.07 331.28 Z" class="st47"/>
+ </g>
+ <g id="shape100-410" v:mID="100" v:groupContext="shape" transform="translate(27.8077,-2.86477)">
+ <title>Sheet.100</title>
+ <path d="M0.29 342.55 L6.62 338.89 A1.82805 1.82805 0 0 1 6.62 336.9 L0.29 340.55 A1.82805 1.82805 -180 0
+ 0 0.29 342.55 Z" class="st48"/>
+ </g>
+ <g id="shape101-412" v:mID="101" v:groupContext="shape" transform="translate(23.5035,-4.85627)">
+ <title>Sheet.101</title>
+ <path d="M4.6 342.55 L10.92 338.89 L6.32 336.24 L0 339.89 L4.6 342.55 Z" class="st49"/>
+ </g>
+ <g id="shape102-416" v:mID="102" v:groupContext="shape" transform="translate(23.3588,-2.86477)">
+ <title>Sheet.102</title>
+ <path d="M0.14 339.89 L4.74 342.55 A1.82805 1.82805 0 0 1 4.74 340.55 L0.14 337.9 A3.49826 3.49826 -180 0
+ 0 0.14 339.89 Z" class="st50"/>
+ </g>
+ <g id="shape103-420" v:mID="103" v:groupContext="shape" transform="translate(25.8933,-5.98478)">
+ <title>Sheet.103</title>
+ <path d="M2.87 342.55 L0 340.89" class="st51"/>
+ <path d="M0.94 340.34 L3.82 342" class="st51"/>
+ <path d="M1.88 339.8 L4.76 341.46" class="st51"/>
+ <path d="M2.82 339.26 L5.7 340.92" class="st51"/>
+ <path d="M3.76 338.71 L6.64 340.37" class="st51"/>
+ </g>
+ <g id="shape104-427" v:mID="104" v:groupContext="shape" transform="translate(23.5035,-7.51159)">
+ <title>Sheet.104</title>
+ <path d="M5.13 341.17 L11.45 337.52 A11.9345 11.9345 0 0 1 6.32 338.89 L0 342.55 A11.9345 11.9345 -180 0
+ 0 5.13 341.17 Z" class="st52"/>
+ </g>
+ <g id="shape105-435" v:mID="105" v:groupContext="shape" transform="translate(30.2106,-4.74563)">
+ <title>Sheet.105</title>
+ <path d="M0.98 341.98 L0 342.55" class="st51"/>
+ <path d="M1.26 341.48 L2.24 340.92" class="st51"/>
+ <path d="M2.53 340.42 L3.51 339.86" class="st51"/>
+ </g>
+ <g id="shape106-440" v:mID="106" v:groupContext="shape" transform="translate(23.3588,-2.86477)">
+ <title>Sheet.106</title>
+ <path d="M0.14 339.89 L4.74 342.55 L11.07 338.89 A1.82805 1.82805 0 0 1 11.07 336.9 L7.85 335.04 L11.6 332.87
+ A11.9345 11.9345 0 0 1 6.47 334.25 L0.14 337.9 A3.49826 3.49826 -180 0 0 0.14 339.89"
+ class="st53"/>
+ </g>
+ </g>
+ <g id="group107-443" transform="translate(104.617,-33.8201)" v:mID="107" v:groupContext="group">
+ <v:userDefs>
+ <v:ud v:nameU="SkinColor" v:prompt="" v:val="VT5(#da8c36)"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <title>Directory server.104</title>
+ <g id="shape108-444" v:mID="108" v:groupContext="shape" transform="translate(0,-0.451005)">
+ <title>Sheet.108</title>
+ <g id="shadow108-445" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M0.47 329.86 L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24
+ L18.24 340.22 L22.24 342.55 L22.24 339.27 L36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16
+ 321.13 L3.16 321.5 L0 319.68 L0 329.58 L0.47 329.86 Z" class="st33"/>
+ </g>
+ <path d="M0.47 329.86 L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24 L18.24
+ 340.22 L22.24 342.55 L22.24 339.27 L36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16 321.13 L3.16
+ 321.5 L0 319.68 L0 329.58 L0.47 329.86 Z" class="st34"/>
+ </g>
+ <g id="shape109-448" v:mID="109" v:groupContext="shape" transform="translate(3.1636,-11.8063)">
+ <title>Sheet.109</title>
+ <path d="M16.48 323.24 L32.91 332.66 L16.31 342.55 L0 333.26 L0 332.52 L16.48 323.24 Z" class="st35"/>
+ </g>
+ <g id="shape110-451" v:mID="110" v:groupContext="shape" transform="translate(19.06,-3.68954)">
+ <title>Sheet.110</title>
+ <path d="M17.01 324.55 L0 334.19 L3.18 342.55 L17.01 334.56 L17.01 324.55 Z" class="st36"/>
+ </g>
+ <g id="shape111-454" v:mID="111" v:groupContext="shape" transform="translate(0,-0.415652)">
+ <title>Sheet.111</title>
+ <path d="M22.24 342.55 L0 329.58 L0 319.68 L22.24 332.43 L22.24 342.55 Z" class="st37"/>
+ </g>
+ <g id="shape112-457" v:mID="112" v:groupContext="shape" transform="translate(0.82443,-19.8334)">
+ <title>Sheet.112</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape113-460" v:mID="113" v:groupContext="shape" transform="translate(3.62283,-15.1638)">
+ <title>Sheet.113</title>
+ <path d="M3.22 339.78 A1.86495 1.86495 -180 0 0 1.11 338.38 A1.8709 1.8709 -180 0 0 0.38 340.85 A1.86532
+ 1.86532 -180 0 0 2.5 342.25 A1.87264 1.87264 -180 0 0 3.22 339.78 Z" class="st38"/>
+ </g>
+ <g id="shape114-463" v:mID="114" v:groupContext="shape" transform="translate(3.62283,-10.4867)">
+ <title>Sheet.114</title>
+ <path d="M3.22 339.78 A1.86495 1.86495 -180 0 0 1.11 338.38 A1.8709 1.8709 -180 0 0 0.38 340.85 A1.86532
+ 1.86532 -180 0 0 2.5 342.25 A1.87264 1.87264 -180 0 0 3.22 339.78 Z" class="st38"/>
+ </g>
+ <g id="shape115-466" v:mID="115" v:groupContext="shape" transform="translate(4.52404,-16.3668)">
+ <title>Sheet.115</title>
+ <path d="M1.61 341.16 a0.931952 0.931952 -180 0 0 -1.05741 -0.702645 a0.935408 0.935408 -180 0 0 -0.361118
+ 1.23585 a0.932139 0.932139 -180 0 0 1.05794 0.702645 a0.935822 0.935822 -180 0 0 0.360589 -1.23585
+ Z" class="st39"/>
+ </g>
+ <g id="shape116-469" v:mID="116" v:groupContext="shape" transform="translate(4.52404,-11.6897)">
+ <title>Sheet.116</title>
+ <path d="M1.61 341.16 a0.931952 0.931952 -180 0 0 -1.05741 -0.702645 a0.935875 0.935875 -180 0 0 -0.361118
+ 1.23585 a0.932139 0.932139 -180 0 0 1.05794 0.702645 a0.935822 0.935822 -180 0 0 0.360589 -1.23585
+ Z" class="st39"/>
+ </g>
+ <g id="shape117-472" v:mID="117" v:groupContext="shape" transform="translate(7.78787,-8.83469)">
+ <title>Sheet.117</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape118-475" v:mID="118" v:groupContext="shape" transform="translate(10.204,-7.4008)">
+ <title>Sheet.118</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape119-478" v:mID="119" v:groupContext="shape" transform="translate(12.6196,-5.96639)">
+ <title>Sheet.119</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape120-481" v:mID="120" v:groupContext="shape" transform="translate(15.0357,-4.53251)">
+ <title>Sheet.120</title>
+ <path d="M0 341.57 L0.05 333.6 L1.83 334.57 L1.78 342.55 L0 341.57 Z" class="st40"/>
+ </g>
+ <g id="shape121-484" v:mID="121" v:groupContext="shape" transform="translate(8.24006,-10.0631)">
+ <title>Sheet.121</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape122-487" v:mID="122" v:groupContext="shape" transform="translate(10.6556,-8.62924)">
+ <title>Sheet.122</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape123-490" v:mID="123" v:groupContext="shape" transform="translate(13.0717,-7.19483)">
+ <title>Sheet.123</title>
+ <path d="M0.85 342.24 a0.388199 0.388199 0 0 1 -0.425188 0.308698 a0.638045 0.638045 0 0 1 -0.424658 -0.573447
+ L0 336.5 a0.387575 0.387575 0 0 1 0.424658 -0.308698 a0.637725 0.637725 0 0 1 0.425188 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape124-493" v:mID="124" v:groupContext="shape" transform="translate(15.4873,-5.76095)">
+ <title>Sheet.124</title>
+ <path d="M0.85 342.24 a0.388502 0.388502 0 0 1 -0.425717 0.308698 a0.638367 0.638367 0 0 1 -0.424129 -0.573447
+ L0 336.5 a0.387272 0.387272 0 0 1 0.424129 -0.308698 a0.638235 0.638235 0 0 1 0.425717 0.573447
+ L0.85 342.24 Z" class="st41"/>
+ </g>
+ <g id="shape125-496" v:mID="125" v:groupContext="shape" transform="translate(7.78787,-9.81214)">
+ <title>Sheet.125</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape126-499" v:mID="126" v:groupContext="shape" transform="translate(10.204,-8.37826)">
+ <title>Sheet.126</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape127-502" v:mID="127" v:groupContext="shape" transform="translate(12.6196,-6.94385)">
+ <title>Sheet.127</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape128-505" v:mID="128" v:groupContext="shape" transform="translate(15.0357,-5.50996)">
+ <title>Sheet.128</title>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55 L0 342.55 Z" class="st42"/>
+ <path d="M0 342.55 L0.05 334.57 L1.83 335.55" class="st43"/>
+ </g>
+ <g id="shape129-508" v:mID="129" v:groupContext="shape" transform="translate(7.78787,-4.53251)">
+ <title>Sheet.129</title>
+ <path d="M9.08 334.57 L9.03 342.55 L7.25 341.57 L9.08 334.57 ZM6.66 333.14 L6.61 341.11 L4.83 340.13 L6.66
+ 333.14 ZM4.25 331.7 L4.2 339.68 L2.42 338.7 L4.25 331.7 ZM1.83 330.27 L1.78 338.24 L0 337.27
+ L1.83 330.27 Z" class="st42"/>
+ <path d="M9.08 334.57 L9.03 342.55 L7.25 341.57M6.66 333.14 L6.61 341.11 L4.83 340.13M4.25 331.7 L4.2 339.68
+ L2.42 338.7M1.83 330.27 L1.78 338.24 L0 337.27" class="st44"/>
+ </g>
+ <g id="shape130-511" v:mID="130" v:groupContext="shape" transform="translate(2.22125,-11.8454)">
+ <title>Sheet.130</title>
+ <path d="M0 341.85 L0.63 341.42 L1.42 341.78 L0.03 342.55 L0 341.85 Z" class="st45"/>
+ </g>
+ <g id="shape131-514" v:mID="131" v:groupContext="shape" transform="translate(17.1796,-3.17487)">
+ <title>Sheet.131</title>
+ <path d="M0 341.85 L0.63 341.42 L1.42 341.78 L0.03 342.55 L0 341.85 Z" class="st45"/>
+ </g>
+ <g id="shape132-517" v:mID="132" v:groupContext="shape" transform="translate(1.46036,-10.3893)">
+ <title>Sheet.132</title>
+ <path d="M2.12 341.35 L0 342.55 L0 333.54 L2.12 332.29 L2.12 333.41 L0.79 334.15 L0.79 341.09 L2.18 340.33
+ L2.12 341.35 Z" class="st36"/>
+ </g>
+ <g id="shape133-520" v:mID="133" v:groupContext="shape" transform="translate(16.4187,-1.71875)">
+ <title>Sheet.133</title>
+ <path d="M2.12 341.35 L0 342.55 L0 333.54 L2.12 332.29 L2.12 333.41 L0.79 334.15 L0.79 341.09 L2.18 340.33
+ L2.12 341.35 Z" class="st36"/>
+ </g>
+ <g id="shape134-523" v:mID="134" v:groupContext="shape" transform="translate(0.467548,-10.3893)">
+ <title>Sheet.134</title>
+ <path d="M0.99 333.54 L3.11 332.29 L2.12 331.66 L0 332.91 L0 341.92 L0.99 342.55 L0.99 333.54 Z"
+ class="st46"/>
+ </g>
+ <g id="shape135-526" v:mID="135" v:groupContext="shape" transform="translate(15.4259,-1.71875)">
+ <title>Sheet.135</title>
+ <path d="M0.99 333.54 L3.11 332.29 L2.12 331.66 L0 332.91 L0 341.92 L0.99 342.55 L0.99 333.54 Z"
+ class="st46"/>
+ </g>
+ <g id="shape136-529" v:mID="136" v:groupContext="shape" transform="translate(0.467548,-1.71928)">
+ <title>Sheet.136</title>
+ <path d="M17.34 339.96 L16.75 340.37 L16.75 334.15 L18.07 333.41 L18.07 332.29 L17.08 331.66 L14.96 332.91
+ L14.96 341.92 L15.95 342.55 L18.07 341.35 L18.14 340.33 L17.34 339.96 ZM2.38 331.29 L1.79 331.7
+ L1.79 325.48 L3.11 324.74 L3.11 323.62 L2.12 322.99 L0 324.24 L0 333.25 L0.99 333.87 L3.11 332.68
+ L3.18 331.66 L2.38 331.29 Z" class="st47"/>
+ </g>
+ <g id="shape137-531" v:mID="137" v:groupContext="shape" transform="translate(19.9526,-8.71396)">
+ <title>Sheet.137</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape138-534" v:mID="138" v:groupContext="shape" transform="translate(19.9526,-2.35997)">
+ <title>Sheet.138</title>
+ <path d="M1.13 341.58 a0.653986 0.653986 -180 0 0 -0.73971 -0.492434 a0.656072 0.656072 -180 0 0 -0.253101
+ 0.865731 a0.653066 0.653066 -180 0 0 0.740769 0.491375 a0.655459 0.655459 -180 0 0 0.252042
+ -0.864672 Z" class="st38"/>
+ </g>
+ <g id="shape139-537" v:mID="139" v:groupContext="shape" transform="translate(0,-0.415652)">
+ <title>Sheet.139</title>
+ <path d="M36.07 331.28 L36.07 321.27 L19.64 311.85 L3.16 321.13 L3.16 321.52 L0 319.68 L0 329.58 L0.47 329.86
+ L0.47 331.94 L1.46 332.57 L3.33 331.52 L15.43 338.57 L15.43 340.61 L16.42 341.24 L18.24 340.22
+ L22.24 342.55 L22.24 339.27 L36.07 331.28 Z" class="st47"/>
+ </g>
+ <g id="shape140-539" v:mID="140" v:groupContext="shape" transform="translate(27.8077,-2.86477)">
+ <title>Sheet.140</title>
+ <path d="M0.29 342.55 L6.62 338.89 A1.82805 1.82805 0 0 1 6.62 336.9 L0.29 340.55 A1.82805 1.82805 -180 0
+ 0 0.29 342.55 Z" class="st48"/>
+ </g>
+ <g id="shape141-541" v:mID="141" v:groupContext="shape" transform="translate(23.5035,-4.85627)">
+ <title>Sheet.141</title>
+ <path d="M4.6 342.55 L10.92 338.89 L6.32 336.24 L0 339.89 L4.6 342.55 Z" class="st49"/>
+ </g>
+ <g id="shape142-544" v:mID="142" v:groupContext="shape" transform="translate(23.3588,-2.86477)">
+ <title>Sheet.142</title>
+ <path d="M0.14 339.89 L4.74 342.55 A1.82805 1.82805 0 0 1 4.74 340.55 L0.14 337.9 A3.49826 3.49826 -180 0
+ 0 0.14 339.89 Z" class="st50"/>
+ </g>
+ <g id="shape143-547" v:mID="143" v:groupContext="shape" transform="translate(25.8933,-5.98478)">
+ <title>Sheet.143</title>
+ <path d="M2.87 342.55 L0 340.89" class="st51"/>
+ <path d="M0.94 340.34 L3.82 342" class="st51"/>
+ <path d="M1.88 339.8 L4.76 341.46" class="st51"/>
+ <path d="M2.82 339.26 L5.7 340.92" class="st51"/>
+ <path d="M3.76 338.71 L6.64 340.37" class="st51"/>
+ </g>
+ <g id="shape144-554" v:mID="144" v:groupContext="shape" transform="translate(23.5035,-7.51159)">
+ <title>Sheet.144</title>
+ <path d="M5.13 341.17 L11.45 337.52 A11.9345 11.9345 0 0 1 6.32 338.89 L0 342.55 A11.9345 11.9345 -180 0
+ 0 5.13 341.17 Z" class="st52"/>
+ </g>
+ <g id="shape145-557" v:mID="145" v:groupContext="shape" transform="translate(30.2106,-4.74563)">
+ <title>Sheet.145</title>
+ <path d="M0.98 341.98 L0 342.55" class="st51"/>
+ <path d="M1.26 341.48 L2.24 340.92" class="st51"/>
+ <path d="M2.53 340.42 L3.51 339.86" class="st51"/>
+ </g>
+ <g id="shape146-562" v:mID="146" v:groupContext="shape" transform="translate(23.3588,-2.86477)">
+ <title>Sheet.146</title>
+ <path d="M0.14 339.89 L4.74 342.55 L11.07 338.89 A1.82805 1.82805 0 0 1 11.07 336.9 L7.85 335.04 L11.6 332.87
+ A11.9345 11.9345 0 0 1 6.47 334.25 L0.14 337.9 A3.49826 3.49826 -180 0 0 0.14 339.89"
+ class="st53"/>
+ </g>
+ </g>
+ <g id="shape147-565" v:mID="147" v:groupContext="shape" transform="translate(427.321,214.49) rotate(90)">
+ <title>Cloud</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <path d="M5.37 311.54 A8.61618 10.0654 0 0 1 9.5 292.2 A17.4727 20.4114 0 0 1 34.86 275.89 A20.0634 23.4379 0
+ 0 1 56.58 272.26 A12.5816 14.6977 0 0 1 75.21 271.05 A14.3244 16.7336 0 0 1 97.98 277.09 A10.2423
+ 11.9646 0 0 1 106.25 294.02 A12.6864 14.8197 0 0 1 95.9 318.19 A16.0049 18.6962 0 0 1 73.14 330.27
+ A18.8712 22.0444 0 0 1 42.1 335.11 A23.9217 27.9441 0 0 1 15.2 330.27 A9.43759 11.0249 0 0 1 5.37
+ 311.54 Z" class="st42"/>
+ <path d="M5.37 311.54 A8.61618 10.0654 0 0 1 9.5 292.2 A17.4727 20.4114 0 0 1 34.86 275.89 A20.0634 23.4379 0
+ 0 1 56.58 272.26 A12.5816 14.6977 0 0 1 75.21 271.05 A14.3244 16.7336 0 0 1 97.98 277.09 A10.2423
+ 11.9646 0 0 1 106.25 294.02 A12.6864 14.8197 0 0 1 95.9 318.19 A16.0049 18.6962 0 0 1 73.14 330.27
+ A18.8712 22.0444 0 0 1 42.1 335.11 A23.9217 27.9441 0 0 1 15.2 330.27 A9.43759 11.0249 0 0 1 5.37
+ 311.54" class="st54"/>
+ <path d="M11.05 312.14 A8.59237 10.0375 0 0 1 5.37 311.54" class="st54"/>
+ <path d="M40.54 332.09 A8.62978 10.0812 -180 0 0 42.1 335.11" class="st54"/>
+ <path d="M73.92 326.65 A6.96633 8.13801 0 0 1 73.14 330.27" class="st54"/>
+ <path d="M89.7 308.52 A7.30994 8.5394 0 0 1 95.9 318.19" class="st54"/>
+ <path d="M103.15 297.64 A6.67364 7.79609 -180 0 0 106.25 294.02" class="st54"/>
+ <path d="M37.96 278.3 A10.2914 12.0219 -180 0 0 34.86 275.89" class="st54"/>
+ </g>
+ <g id="shape148-574" v:mID="148" v:groupContext="shape" transform="translate(110.222,-64.9346)">
+ <title>Triangle</title>
+ <desc>setsum</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.6995" cy="336.449" width="31.4" height="12.1933"/>
+ <g id="shadow148-575" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M31.4 342.55 L14.67 324.26 L0 342.55 L31.4 342.55 Z" class="st9"/>
+ </g>
+ <path d="M31.4 342.55 L14.67 324.26 L0 342.55 L31.4 342.55 Z" class="st10"/>
+ <text x="8.35" y="337.95" class="st55" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>setsum</text> </g>
+ <g id="shape149-579" v:mID="149" v:groupContext="shape" transform="translate(292.639,20.8827) rotate(45)">
+ <title>Sheet.149</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L14.71 342.55" class="st14"/>
+ </g>
+ <g id="shape150-584" v:mID="150" v:groupContext="shape" transform="translate(43.2897,-54.1122)">
+ <title>Sheet.150</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L10.07 342.55" class="st14"/>
+ </g>
+ <g id="shape151-589" v:mID="151" v:groupContext="shape" transform="translate(-112.261,8.34531) rotate(-28.1394)">
+ <title>Sheet.151</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 342.55 L18 342.55" class="st14"/>
+ </g>
+ <g id="shape152-594" v:mID="152" v:groupContext="shape">
+ <title>Sheet.152</title>
+ <desc>Clients</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="32.4673" cy="337.134" width="64.94" height="10.8224"/>
+ <rect x="0" y="331.723" width="64.9346" height="10.8224" class="st11"/>
+ <text x="21.5" y="339.53" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Clients</text> </g>
+ <g id="shape153-597" v:mID="153" v:groupContext="shape" transform="translate(83.578,-9.58078)">
+ <title>Sheet.153</title>
+ <desc>Distributed Cache</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="42.0677" cy="337.134" width="84.14" height="10.8224"/>
+ <rect x="0" y="331.723" width="84.1355" height="10.8224" class="st11"/>
+ <text x="13.1" y="339.53" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Distributed Cache</text> </g>
+ <g id="shape154-600" v:mID="154" v:groupContext="shape" transform="translate(181.983,-18.6146)">
+ <title>Sheet.154</title>
+ <desc>Web Servers</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="32.4673" cy="337.134" width="64.94" height="10.8224"/>
+ <rect x="0" y="331.723" width="64.9346" height="10.8224" class="st11"/>
+ <text x="11.93" y="339.53" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Web Servers</text> </g>
+ <g id="shape155-603" v:mID="155" v:groupContext="shape" transform="translate(96.6068,630.978) rotate(180)">
+ <title>Simple Arrow</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <g id="shadow155-604" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,-0.3456,-1.9728)" class="st1">
+ <path d="M0 342.55 L12 330.55 L12 336.55 L16.23 336.55 L16.23 342.55 L16.23 348.54 L12 348.54 L12 354.54
+ L0 342.55 Z" class="st56"/>
+ </g>
+ <path d="M0 342.55 L12 330.55 L12 336.55 L16.23 336.55 L16.23 342.55 L16.23 348.54 L12 348.54 L12 354.54 L0 342.55
+ Z" class="st57"/>
+ </g>
+ <g id="shape156-607" v:mID="156" v:groupContext="shape" transform="translate(173.159,625.567) rotate(180)">
+ <title>Simple Arrow.153</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <g id="shadow156-608" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,-0.3456,-1.9728)" class="st1">
+ <path d="M0 342.55 L12 330.55 L12 336.55 L16.23 336.55 L16.23 342.55 L16.23 348.54 L12 348.54 L12 354.54
+ L0 342.55 Z" class="st56"/>
+ </g>
+ <path d="M0 342.55 L12 330.55 L12 336.55 L16.23 336.55 L16.23 342.55 L16.23 348.54 L12 348.54 L12 354.54 L0 342.55
+ Z" class="st57"/>
+ </g>
+ </g>
+ <g id="shape157-611" v:mID="157" v:groupContext="shape" transform="translate(0,-149.475)">
+ <title>Rectangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow157-612" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="193.823" width="271.116" height="148.722" class="st58"/>
+ </g>
+ <rect x="0" y="193.823" width="271.116" height="148.722" class="st59"/>
+ </g>
+ <g id="shape158-615" v:mID="158" v:groupContext="shape" transform="translate(271.116,-149.475)">
+ <title>Rectangle.158</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow158-616" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="193.823" width="202.104" height="148.722" class="st58"/>
+ </g>
+ <rect x="0" y="193.823" width="202.104" height="148.722" class="st59"/>
+ </g>
+ <g id="shape159-619" v:mID="159" v:groupContext="shape">
+ <title>Rectangle.159</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow159-620" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="193.823" width="271.116" height="148.722" class="st58"/>
+ </g>
+ <rect x="0" y="193.823" width="271.116" height="148.722" class="st59"/>
+ </g>
+ <g id="shape160-623" v:mID="160" v:groupContext="shape" transform="translate(271.116,0)">
+ <title>Rectangle.160</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow160-624" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="193.823" width="202.104" height="148.722" class="st58"/>
+ </g>
+ <rect x="0" y="193.823" width="202.104" height="148.722" class="st59"/>
+ </g>
+ <g id="shape161-627" v:mID="161" v:groupContext="shape" transform="translate(83.578,-151.241)">
+ <title>Sheet.161</title>
+ <desc>(a) Distributed Web Cache</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54.3546" cy="333.806" width="108.71" height="17.4792"/>
+ <g id="shadow161-628" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="325.066" width="108.709" height="17.4792" class="st9"/>
+ </g>
+ <rect x="0" y="325.066" width="108.709" height="17.4792" class="st10"/>
+ <text x="4" y="336.81" class="st60" v:langID="1033"><v:paragraph v:spLine="-1.5" v:spBefore="8" v:spAfter="16"
+ v:bulletSize="0.166667"/><v:tabList/>(a) Distributed Web Cache</text> </g>
+ <g id="shape162-632" v:mID="162" v:groupContext="shape" transform="translate(319.513,-151.241)">
+ <title>Sheet.162</title>
+ <desc>(b) Detecting Routing Loops</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54.3546" cy="333.806" width="108.71" height="17.4792"/>
+ <g id="shadow162-633" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="325.066" width="108.709" height="17.4792" class="st9"/>
+ </g>
+ <rect x="0" y="325.066" width="108.709" height="17.4792" class="st10"/>
+ <text x="4" y="336.81" class="st60" v:langID="1033"><v:paragraph v:spLine="-1.5" v:spBefore="8" v:spAfter="16"
+ v:bulletSize="0.166667"/><v:tabList/>(b) Detecting Routing Loops</text> </g>
+ <g id="shape163-637" v:mID="163" v:groupContext="shape" transform="translate(77.5283,-3.35965)">
+ <title>Sheet.163</title>
+ <desc>(c) In-order Workload Scheduler</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="63.5211" cy="333.806" width="127.05" height="17.4792"/>
+ <g id="shadow163-638" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="325.066" width="127.042" height="17.4792" class="st9"/>
+ </g>
+ <rect x="0" y="325.066" width="127.042" height="17.4792" class="st10"/>
+ <text x="4" y="336.81" class="st60" v:langID="1033"><v:paragraph v:spLine="-1.5" v:spBefore="8" v:spAfter="16"
+ v:bulletSize="0.166667"/><v:tabList/>(c) In-order Workload Scheduler</text> </g>
+ <g id="shape164-642" v:mID="164" v:groupContext="shape" transform="translate(307.414,-3.35965)">
+ <title>Sheet.164</title>
+ <desc>(d) Database Semi-join Operations</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="66.2253" cy="333.806" width="132.46" height="17.4792"/>
+ <g id="shadow164-643" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="325.066" width="132.451" height="17.4792" class="st9"/>
+ </g>
+ <rect x="0" y="325.066" width="132.451" height="17.4792" class="st10"/>
+ <text x="4" y="336.81" class="st60" v:langID="1033"><v:paragraph v:spLine="-1.5" v:spBefore="8" v:spAfter="16"
+ v:bulletSize="0.166667"/><v:tabList/>(d) Database Semi-join Operations</text> </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i2.svg b/doc/guides/prog_guide/img/member_i2.svg
new file mode 100644
index 00000000..759c6545
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i2.svg
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export memship_i2.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="4.38194in" height="1.25694in"
+ viewBox="0 0 315.5 90.5" xml:space="preserve" color-interpolation-filters="sRGB" class="st6">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:none;stroke:none;stroke-width:0.25}
+ .st2 {fill:#5b9bd5;font-family:Calibri;font-size:1.16666em}
+ .st3 {baseline-shift:32.4943%;font-size:0.649886em}
+ .st4 {font-size:1em}
+ .st5 {font-family:Cambria Math;font-size:1em}
+ .st6 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(0.25,-0.25)">
+ <title>Sheet.3</title>
+ <desc>False Positive Probability = (1-(1-1/m)kn)k ≃ (1-ekn/m)k</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="157.5" cy="45.5" width="315" height="90"/>
+ <rect x="0" y="0.5" width="315" height="90" class="st1"/>
+ <text x="8.28" y="49.82" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>False Positive Probability = (1-(1-1/m)<tspan
+ dy="-0.234em" class="st3" v:baseFontSize="14">kn</tspan><tspan dy="0.152em" class="st4">)</tspan><tspan
+ dy="-0.234em" class="st3" v:baseFontSize="14">k</tspan><tspan dy="0.152em" class="st4"> </tspan><tspan
+ class="st5">≃</tspan> (1-e<tspan dy="-0.234em" class="st3" v:baseFontSize="14">kn</tspan><tspan class="st3"
+ v:baseFontSize="14">/</tspan><tspan class="st3" v:baseFontSize="14">m</tspan><tspan dy="0.152em"
+ class="st4">)</tspan><tspan dy="-0.234em" class="st3" v:baseFontSize="14">k</tspan></text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i3.svg b/doc/guides/prog_guide/img/member_i3.svg
new file mode 100644
index 00000000..41e92cb8
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i3.svg
@@ -0,0 +1,148 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export memship_i3.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ width="4.71875in" height="2.84375in" viewBox="0 0 339.75 204.75" xml:space="preserve" color-interpolation-filters="sRGB"
+ class="st14">
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {marker-end:url(#mrkr5-32);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st5 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st6 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st7 {font-size:1em}
+ .st8 {fill:#deebf6;stroke:#c7c8c8;stroke-width:0.25}
+ .st9 {fill:#000000;font-family:Calibri;font-size:0.833336em}
+ .st10 {marker-end:url(#mrkr5-84);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st11 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st12 {fill:none;stroke:none;stroke-width:0.25}
+ .st13 {fill:#ff0000;font-family:Calibri;font-size:1.00001em}
+ .st14 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-32" class="st5" refX="-6.16" orient="auto" markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-84" class="st11" refX="-5.8" orient="auto" markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g>
+ <title>Page-1</title>
+ <g id="group174-1" transform="translate(3.0294,-5.3478)">
+ <title>Sheet.174</title>
+ <g id="shape155-2" transform="translate(99,-99)">
+ <title>Circle</title>
+ <g id="shadow155-3" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st2"/>
+ </g>
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st3"/>
+ </g>
+ <g id="shape156-7" transform="translate(207,-108)">
+ <title>Circle.156</title>
+ <g id="shadow156-8" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st2"/>
+ </g>
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st3"/>
+ </g>
+ <g id="shape157-12" transform="translate(153,-162)">
+ <title>Circle.157</title>
+ <g id="shadow157-13" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st2"/>
+ </g>
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st3"/>
+ </g>
+ <g id="shape158-17" transform="translate(297,-99)">
+ <title>Circle.158</title>
+ <g id="shadow158-18" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st2"/>
+ </g>
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st3"/>
+ </g>
+ <g id="shape159-22" transform="translate(180,-36)">
+ <title>Circle.159</title>
+ <g id="shadow159-23" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st2"/>
+ </g>
+ <path d="M0 186.75 A18 18 0 0 1 36 186.75 A18 18 0 1 1 0 186.75 Z" class="st3"/>
+ </g>
+ <g id="shape160-27" transform="translate(109.604,-115.419) rotate(-7.12502)">
+ <title>Sheet.160</title>
+ <path d="M0 204.75 L66.4 204.75" class="st4"/>
+ </g>
+ <g id="shape161-33" transform="translate(276.661,-123.214) rotate(9.46232)">
+ <title>Sheet.161</title>
+ <path d="M0 204.75 L48.58 204.75" class="st4"/>
+ </g>
+ <g id="shape162-38" transform="translate(246.135,262.572) rotate(-160.346)">
+ <title>Sheet.162</title>
+ <path d="M0 204.75 L127.63 204.75" class="st4"/>
+ </g>
+ <g id="shape163-43" transform="translate(284.391,198.775) rotate(141.977)">
+ <title>Sheet.163</title>
+ <path d="M0 204.75 L46.23 204.75" class="st4"/>
+ </g>
+ <g id="shape164-48" transform="translate(70.6118,307.655) rotate(-145.945)">
+ <title>Sheet.164</title>
+ <path d="M0 204.75 L60.88 204.75" class="st4"/>
+ </g>
+ <g id="shape167-53" transform="translate(72,0)">
+ <title>Rectangle.167</title>
+ <desc>BF of IDs</desc>
+ <g id="shadow167-54" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="177.75" width="36" height="27" class="st2"/>
+ </g>
+ <rect x="0" y="177.75" width="36" height="27" class="st3"/>
+ <text x="7.69" y="188.25" class="st6">BF of <tspan x="11.71" dy="1.2em" class="st7">IDs</tspan></text> </g>
+ <g id="shape168-60" transform="translate(108,0)">
+ <title>Rectangle.168</title>
+ <desc>Packet</desc>
+ <g id="shadow168-61" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="177.75" width="90" height="27" class="st2"/>
+ </g>
+ <rect x="0" y="177.75" width="90" height="27" class="st8"/>
+ <text x="31.47" y="194.25" class="st9">Packet</text> </g>
+ <g id="shape169-66" transform="translate(0,-63)">
+ <title>Rectangle.169</title>
+ <desc>BF of IDs</desc>
+ <g id="shadow169-67" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="177.75" width="36" height="27" class="st2"/>
+ </g>
+ <rect x="0" y="177.75" width="36" height="27" class="st3"/>
+ <text x="7.69" y="188.25" class="st6">BF of <tspan x="11.71" dy="1.2em" class="st7">IDs</tspan></text> </g>
+ <g id="shape170-73" transform="translate(36,-63)">
+ <title>Rectangle.170</title>
+ <desc>Packet</desc>
+ <g id="shadow170-74" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="177.75" width="90" height="27" class="st2"/>
+ </g>
+ <rect x="0" y="177.75" width="90" height="27" class="st8"/>
+ <text x="31.47" y="194.25" class="st9">Packet</text> </g>
+ <g id="shape171-79" transform="translate(240.248,331.493) rotate(161.565)">
+ <title>Sheet.171</title>
+ <path d="M-0 190.52 A81.3416 36.0611 -153.48 0 0 82.31 195.86 L82.49 195.55" class="st10"/>
+ </g>
+ <g id="shape172-85" transform="translate(156.426,260.029) rotate(161.565)">
+ <title>Sheet.172</title>
+ <path d="M-0 181.6 A88.1422 54.1439 -124.1 0 0 82.68 187.13 L82.83 186.81" class="st10"/>
+ </g>
+ <g id="shape173-90" transform="translate(18,-121.5)">
+ <title>Sheet.173</title>
+ <desc>Encode ID</desc>
+ <rect x="0" y="177.75" width="63" height="27" class="st12"/>
+ <text x="7.02" y="194.85" class="st13">Encode ID</text> </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i4.svg b/doc/guides/prog_guide/img/member_i4.svg
new file mode 100644
index 00000000..a2b6f2f5
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i4.svg
@@ -0,0 +1,450 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Generated by Microsoft Visio 11.0, SVG Export, v1.0 memship_i4.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="7.625in" height="3.125in" viewBox="0 0 549 225"
+ xml:space="preserve" color-interpolation-filters="sRGB" class="st18">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:none;stroke:#ff0000;stroke-width:0.25}
+ .st2 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em}
+ .st3 {marker-end:url(#mrkr5-10);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.25}
+ .st4 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.47169811320755}
+ .st5 {visibility:visible}
+ .st6 {fill:#5b9bd5;fill-opacity:0.22;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st7 {fill:#5b9bd5;stroke:#c8c8c8;stroke-width:0.25}
+ .st8 {fill:none;stroke:none;stroke-width:0.25}
+ .st9 {fill:#5b9bd5;font-family:Calibri;font-size:1.16666em}
+ .st10 {stroke:#5b9bd5;stroke-dasharray:1.5,3;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st11 {fill:#5b9bd5;fill-opacity:0.25;stroke:#5b9bd5;stroke-opacity:0.25;stroke-width:0.75}
+ .st12 {fill:#4f88bb;stroke:#41719c;stroke-width:0.75}
+ .st13 {fill:#ffffff;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st14 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st15 {font-size:1em}
+ .st16 {marker-end:url(#mrkr5-162);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st17 {fill:#000000;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st18 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-10" class="st4" v:arrowType="5" v:arrowSize="2" v:setback="3.71" refX="-3.71" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.12,-2.12) "/>
+ </marker>
+ <marker id="mrkr5-162" class="st17" v:arrowType="5" v:arrowSize="2" v:setback="5.8" refX="-5.8" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="group47-1" transform="translate(3.0294,-0.25)" v:mID="47" v:groupContext="group">
+ <title>Sheet.47</title>
+ <g id="shape1-2" v:mID="1" v:groupContext="shape" transform="translate(177.75,-191.922)">
+ <title>Sheet.1</title>
+ <desc>Element</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="216" width="108" height="18"/>
+ <rect x="0" y="207" width="108" height="18" class="st1"/>
+ <text x="33.77" y="219.6" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Element</text> </g>
+ <g id="shape2-5" v:mID="2" v:groupContext="shape" transform="translate(456.75,33.0781) rotate(90)">
+ <title>Sheet.2</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L18.65 225" class="st3"/>
+ </g>
+ <g id="shape3-11" v:mID="3" v:groupContext="shape" transform="translate(0,-67.0469)">
+ <title>Rectangle.54</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow3-12" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape4-15" v:mID="4" v:groupContext="shape" transform="translate(27,-67.0469)">
+ <title>Rectangle.55</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow4-16" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape5-19" v:mID="5" v:groupContext="shape" transform="translate(54,-67.0469)">
+ <title>Rectangle.56</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow5-20" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape6-23" v:mID="6" v:groupContext="shape" transform="translate(0,-53.5469)">
+ <title>Rectangle.57</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow6-24" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape7-27" v:mID="7" v:groupContext="shape" transform="translate(27,-53.5469)">
+ <title>Rectangle.58</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow7-28" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape8-31" v:mID="8" v:groupContext="shape" transform="translate(54,-53.5469)">
+ <title>Rectangle.59</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow8-32" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape9-35" v:mID="9" v:groupContext="shape" transform="translate(5.625,-72.6719)">
+ <title>Sheet.9</title>
+ <desc>BF-1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36" cy="211.5" width="72" height="27"/>
+ <rect x="0" y="198" width="72" height="27" class="st8"/>
+ <text x="23.29" y="215.7" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>BF-1</text> </g>
+ <g id="shape10-38" v:mID="10" v:groupContext="shape" transform="translate(128.25,-65.0781)">
+ <title>Rectangle.74</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow10-39" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape11-42" v:mID="11" v:groupContext="shape" transform="translate(155.25,-65.0781)">
+ <title>Rectangle.75</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-43" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape12-46" v:mID="12" v:groupContext="shape" transform="translate(182.25,-65.0781)">
+ <title>Rectangle.76</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow12-47" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape13-50" v:mID="13" v:groupContext="shape" transform="translate(128.25,-51.5781)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow13-51" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape14-54" v:mID="14" v:groupContext="shape" transform="translate(155.25,-51.5781)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow14-55" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape15-58" v:mID="15" v:groupContext="shape" transform="translate(182.25,-51.5781)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow15-59" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape16-62" v:mID="16" v:groupContext="shape" transform="translate(301.5,-65.0781)">
+ <title>Rectangle.81</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow16-63" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape17-66" v:mID="17" v:groupContext="shape" transform="translate(328.5,-65.0781)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow17-67" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape18-70" v:mID="18" v:groupContext="shape" transform="translate(355.5,-65.0781)">
+ <title>Rectangle.83</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow18-71" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape19-74" v:mID="19" v:groupContext="shape" transform="translate(301.5,-51.5781)">
+ <title>Rectangle.84</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow19-75" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape20-78" v:mID="20" v:groupContext="shape" transform="translate(328.5,-51.5781)">
+ <title>Rectangle.85</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow20-79" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape21-82" v:mID="21" v:groupContext="shape" transform="translate(355.5,-51.5781)">
+ <title>Rectangle.86</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow21-83" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape22-86" v:mID="22" v:groupContext="shape" transform="translate(447.75,-65.6406)">
+ <title>Rectangle.88</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow22-87" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape23-90" v:mID="23" v:groupContext="shape" transform="translate(474.75,-65.6406)">
+ <title>Rectangle.89</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow23-91" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape24-94" v:mID="24" v:groupContext="shape" transform="translate(501.75,-65.6406)">
+ <title>Rectangle.90</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow24-95" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape25-98" v:mID="25" v:groupContext="shape" transform="translate(447.75,-52.1406)">
+ <title>Rectangle.91</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow25-99" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape26-102" v:mID="26" v:groupContext="shape" transform="translate(474.75,-52.1406)">
+ <title>Rectangle.92</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow26-103" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape27-106" v:mID="27" v:groupContext="shape" transform="translate(501.75,-52.1406)">
+ <title>Rectangle.93</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow27-107" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st5">
+ <rect x="0" y="211.5" width="27" height="13.5" class="st6"/>
+ </g>
+ <rect x="0" y="211.5" width="27" height="13.5" class="st7"/>
+ </g>
+ <g id="shape28-110" v:mID="28" v:groupContext="shape" transform="translate(213.75,-63.9531)">
+ <title>Sheet.28</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L83.25 225" class="st10"/>
+ </g>
+ <g id="shape29-113" v:mID="29" v:groupContext="shape" transform="translate(387,-63.9531)">
+ <title>Sheet.29</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L54 225" class="st10"/>
+ </g>
+ <g id="group31-116" transform="translate(184.5,-113.172)" v:mID="31" v:groupContext="group">
+ <title>Sheet.31</title>
+ <g id="shape32-117" v:mID="32" v:groupContext="shape" transform="translate(225,173.25) rotate(90)">
+ <title>Block Arrow</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow32-118" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,1.9728,-0.3456)" class="st5">
+ <path d="M0 225 L25.87 225 L51.75 177.75 L25.87 130.5 L0 130.5 L0 225 Z" class="st11"/>
+ </g>
+ <path d="M0 225 L25.87 225 L51.75 177.75 L25.87 130.5 L0 130.5 L0 225 Z" class="st12"/>
+ </g>
+ <g id="shape33-121" v:mID="33" v:groupContext="shape" transform="translate(2.25,-24.3529)">
+ <title>Sheet.33</title>
+ <desc>h1, h2 .. hk</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="45" cy="215.868" width="90" height="18.2647"/>
+ <rect x="0" y="206.735" width="90" height="18.2647" class="st8"/>
+ <text x="17.56" y="219.47" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>h1, h2 .. hk</text> </g>
+ </g>
+ <g id="shape34-124" v:mID="34" v:groupContext="shape" transform="translate(307.011,286.73) rotate(152.323)">
+ <title>Sheet.34</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L128.85 225" class="st3"/>
+ </g>
+ <g id="shape35-129" v:mID="35" v:groupContext="shape" transform="translate(433.272,125.452) rotate(99.7172)">
+ <title>Sheet.35</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L58.31 225" class="st3"/>
+ </g>
+ <g id="shape36-134" v:mID="36" v:groupContext="shape" transform="translate(407.724,-64.1459) rotate(45)">
+ <title>Sheet.36</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L79.16 225" class="st3"/>
+ </g>
+ <g id="shape37-139" v:mID="37" v:groupContext="shape" transform="translate(320.441,-127.12) rotate(15.6155)">
+ <title>Sheet.37</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 225 L200.75 225" class="st3"/>
+ </g>
+ <g id="shape38-144" v:mID="38" v:groupContext="shape" transform="translate(132.75,-75.2588)">
+ <title>Sheet.38</title>
+ <desc>BF-2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36" cy="211.5" width="72" height="27"/>
+ <rect x="0" y="198" width="72" height="27" class="st8"/>
+ <text x="23.29" y="215.7" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>BF-2</text> </g>
+ <g id="shape39-147" v:mID="39" v:groupContext="shape" transform="translate(303.75,-70.7588)">
+ <title>Sheet.39</title>
+ <desc>BF-X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36" cy="211.5" width="72" height="27"/>
+ <rect x="0" y="198" width="72" height="27" class="st8"/>
+ <text x="23.2" y="215.7" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>BF-X</text> </g>
+ <g id="shape40-150" v:mID="40" v:groupContext="shape" transform="translate(447.75,-75.2588)">
+ <title>Sheet.40</title>
+ <desc>BF-L</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36" cy="211.5" width="72" height="27"/>
+ <rect x="0" y="198" width="72" height="27" class="st8"/>
+ <text x="23.89" y="215.7" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>BF-L</text> </g>
+ <g id="shape41-153" v:mID="41" v:groupContext="shape" transform="translate(300.375,-117)">
+ <title>Sheet.41</title>
+ <desc>Hashing for lookup/Insertion into a vector of BFs happens once</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="90" cy="202.5" width="180" height="45"/>
+ <rect x="0" y="180" width="180" height="45" class="st8"/>
+ <text x="4.6" y="198.9" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Hashing for lookup/Insertion into a <tspan
+ x="23.06" dy="1.2em" class="st15">vector of BFs happens once</tspan></text> </g>
+ <g id="shape44-157" v:mID="44" v:groupContext="shape" transform="translate(249.698,-151.505) rotate(-3.74012)">
+ <title>Sheet.44</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M-0 225 A93.4958 45.6256 42.23 0 1 79.38 221.66 L79.68 221.85" class="st16"/>
+ </g>
+ <g id="shape45-163" v:mID="45" v:groupContext="shape" transform="translate(30.375,0.25)">
+ <title>Sheet.45</title>
+ <desc>Lookup/Insertion is done in the series of BFs, one by one or ...</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="233.048" cy="202.5" width="466.1" height="45"/>
+ <rect x="0" y="180" width="466.096" height="45" class="st8"/>
+ <text x="4.34" y="206.1" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Lookup/Insertion is done in the series of BFs, one by one or can be optimized to do in parallel. </text> </g>
+ <g id="shape46-166" v:mID="46" v:groupContext="shape" transform="translate(123.252,-43.6868) rotate(17.0249)">
+ <title>Sheet.46</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M-0 225 A88.2185 43.0621 47.63 0 1 70.31 221.39 L70.6 221.6" class="st16"/>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i5.svg b/doc/guides/prog_guide/img/member_i5.svg
new file mode 100644
index 00000000..c1728cf2
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i5.svg
@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Generated by Microsoft Visio 11.0, SVG Export, v1.0 memship_i5.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="5.30481in" height="1.96146in"
+ viewBox="0 0 381.946 141.225" xml:space="preserve" color-interpolation-filters="sRGB" class="st15">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st3 {fill:#5b9bd5;stroke:#c8c8c8;stroke-width:0.25}
+ .st4 {fill:none;stroke:none;stroke-width:0.25}
+ .st5 {fill:#ffffff;font-family:Calibri;font-size:1.16666em;font-weight:bold}
+ .st6 {marker-end:url(#mrkr5-14);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st7 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st8 {fill:#5b9bd5;font-family:Calibri;font-size:0.833336em}
+ .st9 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em}
+ .st10 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st11 {marker-end:url(#mrkr5-63);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st12 {fill:#000000;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.22935779816514}
+ .st13 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st14 {font-size:1em}
+ .st15 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-14" class="st7" v:arrowType="5" v:arrowSize="2" v:setback="6.16" refX="-6.16" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-63" class="st12" v:arrowType="5" v:arrowSize="2" v:setback="7.15" refX="-7.15" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-4.36,-4.36) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="group1-1" transform="translate(191.995,-19.4751)" v:mID="1" v:groupContext="group">
+ <title>Sheet.1</title>
+ <g id="shape2-2" v:mID="2" v:groupContext="shape" transform="translate(146.944,42.2251) rotate(90)">
+ <title>Triangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow2-3" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,1.9728,-0.3456)" class="st1">
+ <path d="M99 141.23 L49.5 58.62 L0 141.23 L99 141.23 Z" class="st2"/>
+ </g>
+ <path d="M99 141.23 L49.5 58.62 L0 141.23 L99 141.23 Z" class="st3"/>
+ </g>
+ <g id="shape3-6" v:mID="3" v:groupContext="shape" transform="translate(0,-34.65)">
+ <title>Sheet.3</title>
+ <desc>vBF</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="38.1251" cy="126.375" width="76.26" height="29.7"/>
+ <rect x="0" y="111.525" width="76.2502" height="29.7" class="st4"/>
+ <text x="27.68" y="130.58" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>vBF </text> </g>
+ </g>
+ <g id="shape4-9" v:mID="4" v:groupContext="shape" transform="translate(126.724,-100.475)">
+ <title>Sheet.4</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L64.83 141.23" class="st6"/>
+ </g>
+ <g id="shape5-15" v:mID="5" v:groupContext="shape" transform="translate(103.5,-101.775)">
+ <title>Sheet.5</title>
+ <desc>Flow Key</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="39.9122" cy="135.863" width="79.83" height="10.7251"/>
+ <rect x="0" y="130.5" width="79.8244" height="10.7251" class="st4"/>
+ <text x="21.78" y="138.86" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Key</text> </g>
+ <g id="shape6-18" v:mID="6" v:groupContext="shape" transform="translate(221.726,-56.2468) rotate(-24.5123)">
+ <title>Sheet.6</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L65.42 141.23" class="st6"/>
+ </g>
+ <g id="shape7-23" v:mID="7" v:groupContext="shape" transform="translate(280.318,-68.9751)">
+ <title>Sheet.7</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L64.83 141.23" class="st6"/>
+ </g>
+ <g id="shape8-28" v:mID="8" v:groupContext="shape" transform="translate(338.125,-56.6022) rotate(24.1625)">
+ <title>Sheet.8</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L70.8 141.23" class="st6"/>
+ </g>
+ <g id="shape9-33" v:mID="9" v:groupContext="shape" transform="translate(197.714,217.975) rotate(180)">
+ <title>Sheet.9</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L51.03 141.23" class="st6"/>
+ </g>
+ <g id="shape10-38" v:mID="10" v:groupContext="shape" transform="translate(18,-67.5)">
+ <title>Sheet.10</title>
+ <desc>New Flow =&#62; New Assignment</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="80.4201" cy="134.475" width="160.85" height="13.5"/>
+ <rect x="0" y="127.725" width="160.84" height="13.5" class="st4"/>
+ <text x="25.11" y="137.18" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>New Flow =&#62; New Assignment</text> </g>
+ <g id="shape11-41" v:mID="11" v:groupContext="shape" transform="translate(198.032,253.975) rotate(180)">
+ <title>Sheet.11</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 141.23 L51.03 141.23" class="st6"/>
+ </g>
+ <g id="shape12-46" v:mID="12" v:groupContext="shape" transform="translate(0,-31.5)">
+ <title>Sheet.12</title>
+ <desc>Old Flow =&#62; forward to specific thread</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="81" cy="136.725" width="162.01" height="9"/>
+ <rect x="0" y="132.225" width="162" height="9" class="st4"/>
+ <text x="11.04" y="139.43" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Old Flow =&#62; forward to specific thread</text> </g>
+ <g id="shape13-49" v:mID="13" v:groupContext="shape" transform="translate(494.552,22.75) rotate(90)">
+ <title>Sheet.13</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 134.49 C3.18 142.89 7.57 142.28 11.25 138.99 C15.79 134.93 19.26 126.78 27 134.49" class="st10"/>
+ </g>
+ <g id="shape14-52" v:mID="14" v:groupContext="shape" transform="translate(494.552,58.75) rotate(90)">
+ <title>Sheet.14</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 134.49 C3.18 142.89 7.57 142.28 11.25 138.99 C15.79 134.93 19.26 126.78 27 134.49" class="st10"/>
+ </g>
+ <g id="shape15-55" v:mID="15" v:groupContext="shape" transform="translate(494.552,94.75) rotate(90)">
+ <title>Sheet.15</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 134.49 C3.18 142.89 7.57 142.28 11.25 138.99 C15.79 134.93 19.26 126.78 27 134.49" class="st10"/>
+ </g>
+ <g id="shape17-58" v:mID="17" v:groupContext="shape" transform="translate(348.769,-25.0593) rotate(44.5185)">
+ <title>Sheet.17</title>
+ <path d="M-0 141.23 A35.1884 19.2595 167.75 0 1 42.43 138.27 L42.74 138.46" class="st11"/>
+ </g>
+ <g id="shape18-64" v:mID="18" v:groupContext="shape" transform="translate(222.188,-5.40005)">
+ <title>Sheet.18</title>
+ <desc>A BF corresponding to each worker thread</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="59.0625" cy="127.725" width="118.13" height="27"/>
+ <rect x="0" y="114.225" width="118.125" height="27" class="st4"/>
+ <text x="5.14" y="124.13" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>A BF corresponding to <tspan
+ x="11.19" dy="1.2em" class="st14">each worker thread</tspan></text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i6.svg b/doc/guides/prog_guide/img/member_i6.svg
new file mode 100644
index 00000000..265179f6
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i6.svg
@@ -0,0 +1,332 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Generated by Microsoft Visio 11.0, SVG Export, v1.0 memship_i6.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="8in" height="3.625in" viewBox="0 0 576 261"
+ xml:space="preserve" color-interpolation-filters="sRGB" class="st16">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st3 {fill:#5b9bd5;stroke:#c8c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.666664em;font-weight:bold}
+ .st5 {font-size:1em}
+ .st6 {fill:#70ad47;fill-opacity:0.5;stroke:#00b050;stroke-width:1.5}
+ .st7 {fill:none;stroke:none;stroke-width:0.25}
+ .st8 {fill:#00b050;font-family:Calibri;font-size:1.00001em}
+ .st9 {fill:none;stroke:#ff0000;stroke-width:0.25}
+ .st10 {fill:#5b9bd5;font-family:Calibri;font-size:0.833336em}
+ .st11 {marker-end:url(#mrkr5-29);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.25}
+ .st12 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.47169811320755}
+ .st13 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em}
+ .st14 {fill:#92d050;stroke:#c8c8c8;stroke-width:0.25}
+ .st15 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st16 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-29" class="st12" v:arrowType="5" v:arrowSize="2" v:setback="3.71" refX="-3.71" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.12,-2.12) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="group121-1" transform="translate(21.0294,-9.8478)" v:mID="121" v:groupContext="group">
+ <title>Sheet.121</title>
+ <g id="shape49-2" v:mID="49" v:groupContext="shape" transform="translate(396.989,-54.9268)">
+ <title>Rectangle.2</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow49-3" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="219.549" width="99.4817" height="41.4507" class="st2"/>
+ </g>
+ <rect x="0" y="219.549" width="99.4817" height="41.4507" class="st3"/>
+ </g>
+ <g id="shape50-6" v:mID="50" v:groupContext="shape" transform="translate(248.261,-12.1936)">
+ <title>Rectangle.4</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow50-7" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="178.099" width="99.4817" height="82.9014" class="st2"/>
+ </g>
+ <rect x="0" y="178.099" width="99.4817" height="82.9014" class="st3"/>
+ </g>
+ <g id="shape52-10" v:mID="52" v:groupContext="shape" transform="translate(6.07514E-013,-29.0155)">
+ <title>Rectangle.10</title>
+ <desc>Signatures for target 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="49.7409" cy="225.767" width="99.49" height="70.4662"/>
+ <g id="shadow52-11" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="190.534" width="99.4817" height="70.4662" class="st2"/>
+ </g>
+ <rect x="0" y="190.534" width="99.4817" height="70.4662" class="st3"/>
+ <text x="26.54" y="223.37" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Signatures for<v:newlineChar/><tspan
+ x="36.73" dy="1.2em" class="st5">target </tspan>1</text> </g>
+ <g id="shape53-16" v:mID="53" v:groupContext="shape" transform="translate(239.971,-20.4837)">
+ <title>Sheet.53</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <rect x="0" y="248.565" width="116.062" height="12.4352" class="st6"/>
+ </g>
+ <g id="shape54-18" v:mID="54" v:groupContext="shape" transform="translate(353.649,-19.9346)">
+ <title>Sheet.54</title>
+ <desc>Match 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="33.1606" cy="254.782" width="66.33" height="12.4352"/>
+ <rect x="0" y="248.565" width="66.3211" height="12.4352" class="st7"/>
+ <text x="13.06" y="258.38" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Match 1</text> </g>
+ <g id="shape55-21" v:mID="55" v:groupContext="shape" transform="translate(216.989,-210.652)">
+ <title>Sheet.55</title>
+ <desc>Packet Payload</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="49.7409" cy="252.71" width="99.49" height="16.5803"/>
+ <rect x="0" y="244.42" width="99.4817" height="16.5803" class="st9"/>
+ <text x="19.04" y="255.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Packet Payload</text> </g>
+ <g id="shape56-24" v:mID="56" v:groupContext="shape" transform="translate(526.665,52.2365) rotate(90)">
+ <title>Sheet.56</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 261 L16.52 261" class="st11"/>
+ </g>
+ <g id="shape96-30" v:mID="96" v:groupContext="shape" transform="translate(-3.0294,-95.7818)">
+ <title>Sheet.96</title>
+ <desc>Attack Signature Length 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="51.75" cy="248.565" width="103.5" height="24.8704"/>
+ <rect x="0" y="236.13" width="103.5" height="24.8704" class="st7"/>
+ <text x="4.79" y="251.26" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Attack Signature Length 1</text> </g>
+ <g id="group114-33" transform="translate(228.359,-134.152)" v:mID="114" v:groupContext="group">
+ <title>Sheet.114</title>
+ <g id="group106-34" transform="translate(0,-24.8704)" v:mID="106" v:groupContext="group">
+ <title>Sheet.106</title>
+ <g id="shape100-35" v:mID="100" v:groupContext="shape" transform="translate(3.65707E-013,-12.4352)">
+ <title>Rectangle.100</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow100-36" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape101-39" v:mID="101" v:groupContext="shape" transform="translate(24.8704,-12.4352)">
+ <title>Rectangle.101</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow101-40" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape102-43" v:mID="102" v:groupContext="shape" transform="translate(49.7409,-12.4352)">
+ <title>Rectangle.102</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow102-44" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape103-47" v:mID="103" v:groupContext="shape">
+ <title>Rectangle.103</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow103-48" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape104-51" v:mID="104" v:groupContext="shape" transform="translate(24.8704,1.13687E-013)">
+ <title>Rectangle.104</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow104-52" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape105-55" v:mID="105" v:groupContext="shape" transform="translate(49.7409,1.13687E-013)">
+ <title>Rectangle.105</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow105-56" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ </g>
+ <g id="group107-59" v:mID="107" v:groupContext="group">
+ <title>Sheet.107</title>
+ <g id="shape108-60" v:mID="108" v:groupContext="shape" transform="translate(3.65707E-013,-12.4352)">
+ <title>Rectangle.100</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow108-61" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape109-64" v:mID="109" v:groupContext="shape" transform="translate(24.8704,-12.4352)">
+ <title>Rectangle.101</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow109-65" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape110-68" v:mID="110" v:groupContext="shape" transform="translate(49.7409,-12.4352)">
+ <title>Rectangle.102</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow110-69" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape111-72" v:mID="111" v:groupContext="shape">
+ <title>Rectangle.103</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <g id="shadow111-73" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st14"/>
+ </g>
+ <g id="shape112-76" v:mID="112" v:groupContext="shape" transform="translate(24.8704,1.13687E-013)">
+ <title>Rectangle.104</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow112-77" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st3"/>
+ </g>
+ <g id="shape113-80" v:mID="113" v:groupContext="shape" transform="translate(49.7409,1.13687E-013)">
+ <title>Rectangle.105</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <g id="shadow113-81" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st2"/>
+ </g>
+ <rect x="0" y="248.565" width="24.8704" height="12.4352" class="st14"/>
+ </g>
+ </g>
+ </g>
+ <g id="shape89-84" v:mID="89" v:groupContext="shape" transform="translate(398.644,-116.927) rotate(24.4696)">
+ <title>Sheet.89</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 261 L143.75 261" class="st11"/>
+ </g>
+ <g id="shape115-89" v:mID="115" v:groupContext="shape" transform="translate(116.062,-1.19371E-012)">
+ <title>Rectangle.115</title>
+ <desc>Signatures for target 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="49.7409" cy="211.259" width="99.49" height="99.4817"/>
+ <g id="shadow115-90" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="161.518" width="99.4817" height="99.4817" class="st2"/>
+ </g>
+ <rect x="0" y="161.518" width="99.4817" height="99.4817" class="st3"/>
+ <text x="26.54" y="208.86" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Signatures for<v:newlineChar/><tspan
+ x="36.73" dy="1.2em" class="st5">target </tspan>2</text> </g>
+ <g id="shape116-95" v:mID="116" v:groupContext="shape" transform="translate(117.989,-95.7818)">
+ <title>Sheet.116</title>
+ <desc>Attack Signature Length 2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="51.9909" cy="248.565" width="103.99" height="24.8704"/>
+ <rect x="0" y="236.13" width="103.982" height="24.8704" class="st7"/>
+ <text x="5.03" y="251.26" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Attack Signature Length 2</text> </g>
+ <g id="shape118-98" v:mID="118" v:groupContext="shape" transform="translate(392.971,-90.217)">
+ <title>Sheet.118</title>
+ <desc>Attack Signature Length L</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="248.565" width="108" height="24.8704"/>
+ <rect x="0" y="236.13" width="108" height="24.8704" class="st7"/>
+ <text x="7.43" y="251.26" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Attack Signature Length L</text> </g>
+ <g id="shape119-101" v:mID="119" v:groupContext="shape" transform="translate(384.909,-64.9346)">
+ <title>Sheet.119</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <rect x="0" y="248.565" width="116.062" height="12.4352" class="st6"/>
+ </g>
+ <g id="shape120-103" v:mID="120" v:groupContext="shape" transform="translate(491.971,-64.9346)">
+ <title>Sheet.120</title>
+ <desc>Match 2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="33.1606" cy="254.782" width="66.33" height="12.4352"/>
+ <rect x="0" y="248.565" width="66.3211" height="12.4352" class="st7"/>
+ <text x="13.06" y="258.38" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Match 2</text> </g>
+ <g id="shape85-106" v:mID="85" v:groupContext="shape" transform="translate(478.538,12.9307) rotate(65.6291)">
+ <title>Sheet.85</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 261 L109.61 261" class="st11"/>
+ </g>
+ <g id="shape117-111" v:mID="117" v:groupContext="shape" transform="translate(247.054,-91.2818)">
+ <title>Sheet.117</title>
+ <desc>Attack Signature Length X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="52.7082" cy="248.565" width="105.42" height="24.8704"/>
+ <rect x="0" y="236.13" width="105.416" height="24.8704" class="st7"/>
+ <text x="5.7" y="251.26" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Attack Signature Length X</text> </g>
+ </g>
+ <g id="shape122-114" v:mID="122" v:groupContext="shape" transform="translate(315.114,-164.13)">
+ <title>Sheet.122</title>
+ <desc>HTSS</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="26.943" cy="248.565" width="53.89" height="24.8704"/>
+ <rect x="0" y="236.13" width="53.8859" height="24.8704" class="st7"/>
+ <text x="14.52" y="252.16" class="st15" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>HTSS</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/member_i7.svg b/doc/guides/prog_guide/img/member_i7.svg
new file mode 100644
index 00000000..e23ae26c
--- /dev/null
+++ b/doc/guides/prog_guide/img/member_i7.svg
@@ -0,0 +1,399 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<!-- Generated by Microsoft Visio 11.0, SVG Export, v1.0 memship_i7.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="8.5in" height="4.5in" viewBox="0 0 612 324"
+ xml:space="preserve" color-interpolation-filters="sRGB" class="st23">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;stroke:#5b9bd5;stroke-opacity:0.22;stroke-width:0.25}
+ .st3 {fill:#5b9bd5;stroke:#c8c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em;font-weight:bold}
+ .st5 {font-size:1em}
+ .st6 {fill:#70ad47;fill-opacity:0.5;stroke:#00b050;stroke-width:1.5}
+ .st7 {fill:none;stroke:none;stroke-width:0.25}
+ .st8 {fill:#00b050;font-family:Calibri;font-size:1.16666em}
+ .st9 {fill:none;stroke:#00b050;stroke-width:2.25}
+ .st10 {fill:#5b9bd5;font-family:Calibri;font-size:0.833336em}
+ .st11 {fill:#5b9bd5;font-family:Calibri;font-size:1.16666em}
+ .st12 {fill:#a8d08d;stroke:#c8c8c8;stroke-width:0.25}
+ .st13 {marker-end:url(#mrkr5-83);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.25}
+ .st14 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.47169811320755}
+ .st15 {marker-end:url(#mrkr5-95);stroke:#92d050;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.25}
+ .st16 {fill:#92d050;fill-opacity:1;stroke:#92d050;stroke-opacity:1;stroke-width:0.47169811320755}
+ .st17 {fill:#00b050;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st18 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em}
+ .st19 {fill:none;stroke:#ff0000;stroke-width:2.25}
+ .st20 {fill:#ff0000;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st21 {marker-end:url(#mrkr5-123);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st22 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st23 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-83" class="st14" v:arrowType="5" v:arrowSize="2" v:setback="3.71" refX="-3.71" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.12,-2.12) "/>
+ </marker>
+ <marker id="mrkr5-95" class="st16" v:arrowType="5" v:arrowSize="2" v:setback="3.71" refX="-3.71" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.12,-2.12) "/>
+ </marker>
+ <marker id="mrkr5-123" class="st22" v:arrowType="5" v:arrowSize="2" v:setback="5.8" refX="-5.8" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="group121-1" transform="translate(21.0294,-32.2733)" v:mID="121" v:groupContext="group">
+ <title>Sheet.121</title>
+ <g id="shape49-2" v:mID="49" v:groupContext="shape" transform="translate(460.471,-62.2267)">
+ <title>Rectangle.2</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow49-3" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="279" width="108" height="45" class="st2"/>
+ </g>
+ <rect x="0" y="279" width="108" height="45" class="st3"/>
+ </g>
+ <g id="shape50-6" v:mID="50" v:groupContext="shape" transform="translate(320.452,-18.123)">
+ <title>Rectangle.4</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow50-7" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="234" width="108" height="90" class="st2"/>
+ </g>
+ <rect x="0" y="234" width="108" height="90" class="st3"/>
+ </g>
+ <g id="shape52-10" v:mID="52" v:groupContext="shape" transform="translate(0,-31.5)">
+ <title>Rectangle.10</title>
+ <desc>Flow Keys Matching Mask 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="285.75" width="108" height="76.5"/>
+ <g id="shadow52-11" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="247.5" width="108" height="76.5" class="st2"/>
+ </g>
+ <rect x="0" y="247.5" width="108" height="76.5" class="st3"/>
+ <text x="12.56" y="282.75" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Keys Matching <tspan
+ x="39.1" dy="1.2em" class="st5">Mask </tspan>1</text> </g>
+ <g id="shape53-16" v:mID="53" v:groupContext="shape" transform="translate(311.452,-27.123)">
+ <title>Sheet.53</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <rect x="0" y="310.5" width="126" height="13.5" class="st6"/>
+ </g>
+ <g id="shape54-18" v:mID="54" v:groupContext="shape" transform="translate(424.471,-26.2267)">
+ <title>Sheet.54</title>
+ <desc>Match</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36" cy="317.25" width="72" height="13.5"/>
+ <rect x="0" y="310.5" width="72" height="13.5" class="st7"/>
+ <text x="17.68" y="321.45" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Match</text> </g>
+ <g id="shape55-21" v:mID="55" v:groupContext="shape" transform="translate(261,-247.163)">
+ <title>Sheet.55</title>
+ <desc>Flow ID1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.1728" cy="315" width="54.35" height="18"/>
+ <rect x="0" y="306" width="54.3456" height="18" class="st9"/>
+ <text x="9.52" y="318" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow ID1</text> </g>
+ <g id="shape96-24" v:mID="96" v:groupContext="shape" transform="translate(0,-109.783)">
+ <title>Sheet.96</title>
+ <desc>Flow Mask 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="319.5" width="108" height="9"/>
+ <rect x="0" y="315" width="108" height="9" class="st7"/>
+ <text x="18.51" y="323.7" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Mask 1</text> </g>
+ <g id="group114-27" transform="translate(247.5,-163.783)" v:mID="114" v:groupContext="group">
+ <title>Sheet.114</title>
+ <g id="group106-28" transform="translate(0,-27)" v:mID="106" v:groupContext="group">
+ <title>Sheet.106</title>
+ <g id="shape100-29" v:mID="100" v:groupContext="shape" transform="translate(0,-13.5)">
+ <title>Rectangle.100</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow100-30" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape101-33" v:mID="101" v:groupContext="shape" transform="translate(27,-13.5)">
+ <title>Rectangle.101</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow101-34" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape102-37" v:mID="102" v:groupContext="shape" transform="translate(54,-13.5)">
+ <title>Rectangle.102</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow102-38" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape103-41" v:mID="103" v:groupContext="shape">
+ <title>Rectangle.103</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow103-42" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape104-45" v:mID="104" v:groupContext="shape" transform="translate(27,0)">
+ <title>Rectangle.104</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow104-46" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape105-49" v:mID="105" v:groupContext="shape" transform="translate(54,0)">
+ <title>Rectangle.105</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow105-50" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ </g>
+ <g id="group107-53" v:mID="107" v:groupContext="group">
+ <title>Sheet.107</title>
+ <g id="shape108-54" v:mID="108" v:groupContext="shape" transform="translate(0,-13.5)">
+ <title>Rectangle.100</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow108-55" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape109-58" v:mID="109" v:groupContext="shape" transform="translate(27,-13.5)">
+ <title>Rectangle.101</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow109-59" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape110-62" v:mID="110" v:groupContext="shape" transform="translate(54,-13.5)">
+ <title>Rectangle.102</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <g id="shadow110-63" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st12"/>
+ </g>
+ <g id="shape111-66" v:mID="111" v:groupContext="shape">
+ <title>Rectangle.103</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <g id="shadow111-67" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape112-70" v:mID="112" v:groupContext="shape" transform="translate(27,0)">
+ <title>Rectangle.104</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow112-71" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ <g id="shape113-74" v:mID="113" v:groupContext="shape" transform="translate(54,0)">
+ <title>Rectangle.105</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <g id="shadow113-75" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728"
+ v:shadowType="1" transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="310.5" width="27" height="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="310.5" width="27" height="13.5" class="st3"/>
+ </g>
+ </g>
+ </g>
+ <g id="shape89-78" v:mID="89" v:groupContext="shape" transform="translate(413.723,393.802) rotate(146.31)">
+ <title>Sheet.89</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 324 L153.9 324" class="st13"/>
+ </g>
+ <g id="shape115-84" v:mID="115" v:groupContext="shape" transform="translate(126,0)">
+ <title>Rectangle.115</title>
+ <desc>Flow Keys Matching Mask 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="270" width="108" height="108"/>
+ <g id="shadow115-85" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <rect x="0" y="216" width="108" height="108" class="st2"/>
+ </g>
+ <rect x="0" y="216" width="108" height="108" class="st3"/>
+ <text x="12.56" y="267" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Keys Matching <tspan
+ x="39.1" dy="1.2em" class="st5">Mask </tspan>2</text> </g>
+ <g id="shape85-90" v:mID="85" v:groupContext="shape" transform="translate(635.321,91.2793) rotate(81.3573)">
+ <title>Sheet.85</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 324 L143.93 324" class="st15"/>
+ </g>
+ <g id="shape56-96" v:mID="56" v:groupContext="shape" transform="translate(579.175,-64.556) rotate(64.1257)">
+ <title>Sheet.56</title>
+ <path d="M0 324 L54.31 324" class="st15"/>
+ </g>
+ </g>
+ <g id="shape122-101" v:mID="122" v:groupContext="shape" transform="translate(351,-213.444)">
+ <title>Sheet.122</title>
+ <desc>HTSS with False Negative (Cache)</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="45" cy="304.722" width="90" height="38.556"/>
+ <rect x="0" y="285.444" width="90" height="38.556" class="st7"/>
+ <text x="13.29" y="301.72" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>HTSS with False <tspan
+ x="10.52" dy="1.2em" class="st5">Negative </tspan>(Cache)</text> </g>
+ <g id="shape123-105" v:mID="123" v:groupContext="shape" transform="translate(287.654,-290.556)">
+ <title>Sheet.123</title>
+ <desc>Active</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="24.1875" cy="310.5" width="48.38" height="27"/>
+ <rect x="0" y="297" width="48.375" height="27" class="st7"/>
+ <text x="8.63" y="314.1" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Active</text> </g>
+ <g id="shape124-108" v:mID="124" v:groupContext="shape" transform="translate(278.827,-153)">
+ <title>Sheet.124</title>
+ <desc>Target for Flow ID 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="36.0864" cy="310.5" width="72.18" height="27"/>
+ <rect x="0" y="297" width="72.1728" height="27" class="st9"/>
+ <text x="11.93" y="306.9" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target for <tspan
+ x="13.54" dy="1.2em" class="st5">Flow ID </tspan>1</text> </g>
+ <g id="shape125-112" v:mID="125" v:groupContext="shape" transform="translate(155.857,-254.556)">
+ <title>Sheet.125</title>
+ <desc>Flow ID2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27.1728" cy="315" width="54.35" height="18"/>
+ <rect x="0" y="306" width="54.3456" height="18" class="st19"/>
+ <text x="9.52" y="318" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow ID2</text> </g>
+ <g id="shape126-115" v:mID="126" v:groupContext="shape" transform="translate(153,-270)">
+ <title>Sheet.126</title>
+ <desc>New/Inactive</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="40.5" cy="310.5" width="81" height="27"/>
+ <rect x="0" y="297" width="81" height="27" class="st7"/>
+ <text x="6.77" y="314.1" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>New/Inactive</text> </g>
+ <g id="shape127-118" v:mID="127" v:groupContext="shape" transform="translate(251.739,-239.709) rotate(14.0795)">
+ <title>Sheet.127</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 318.73 A39.2404 18 -180 0 0 49.73 320.91 L50.07 320.78" class="st21"/>
+ </g>
+ <g id="shape128-124" v:mID="128" v:groupContext="shape" transform="translate(219.24,-229.5)">
+ <title>Sheet.128</title>
+ <desc>Miss</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="20.88" cy="310.5" width="41.76" height="27"/>
+ <rect x="0" y="297" width="41.76" height="27" class="st7"/>
+ <text x="7.81" y="314.7" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Miss</text> </g>
+ <g id="shape129-127" v:mID="129" v:groupContext="shape" transform="translate(147.029,-142.056)">
+ <title>Sheet.129</title>
+ <desc>Flow Mask 2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="319.5" width="108" height="9"/>
+ <rect x="0" y="315" width="108" height="9" class="st7"/>
+ <text x="18.51" y="323.7" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Mask 2</text> </g>
+ <g id="shape130-130" v:mID="130" v:groupContext="shape" transform="translate(166.845,-18.5004) rotate(18.2325)">
+ <title>Sheet.130</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 293.46 A71.1913 104.269 -180 0 0 97.04 298.43 L97.25 298.14" class="st21"/>
+ </g>
+ <g id="shape131-135" v:mID="131" v:groupContext="shape" transform="translate(184.406,-3.04505) rotate(-3.24734)">
+ <title>Sheet.131</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 293.46 A112.345 104.269 -180 0 0 154.25 297.52 L154.52 297.28" class="st21"/>
+ </g>
+ <g id="shape132-140" v:mID="132" v:groupContext="shape" transform="translate(301.368,16.888) rotate(-25.868)">
+ <title>Sheet.132</title>
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <path d="M0 293.46 A83.375 104.269 -180 0 0 113.91 298.14 L114.14 297.87" class="st21"/>
+ </g>
+ <g id="shape133-145" v:mID="133" v:groupContext="shape" transform="translate(345.029,-142.056)">
+ <title>Sheet.133</title>
+ <desc>Flow Mask X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="319.5" width="108" height="9"/>
+ <rect x="0" y="315" width="108" height="9" class="st7"/>
+ <text x="18.43" y="323.7" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Mask X</text> </g>
+ <g id="shape134-148" v:mID="134" v:groupContext="shape" transform="translate(481.5,-139.5)">
+ <title>Sheet.134</title>
+ <desc>Flow Mask L</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="319.5" width="108" height="9"/>
+ <rect x="0" y="315" width="108" height="9" class="st7"/>
+ <text x="19.12" y="323.7" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Mask L</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 40f04a10..c4beb346 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -44,23 +44,29 @@ Programmer's Guide
mbuf_lib
poll_mode_drv
rte_flow
+ traffic_metering_and_policing
traffic_management
cryptodev_lib
+ rte_security
link_bonding_poll_mode_drv_lib
timer_lib
hash_lib
efd_lib
+ member_lib
lpm_lib
lpm6_lib
+ flow_classify_lib
packet_distrib_lib
reorder_lib
ip_fragment_reassembly_lib
generic_receive_offload_lib
+ generic_segmentation_offload_lib
pdump_lib
multi_proc_support
kernel_nic_interface
thread_safety_dpdk_functions
eventdev
+ event_ethernet_rx_adapter
qos_framework
power_man
packet_classif_access_ctrl
@@ -191,6 +197,19 @@ Programmer's Guide
:numref:`figure_efd11` :ref:`figure_efd11`
+:numref:`figure_membership1` :ref:`figure_membership1`
+
+:numref:`figure_membership2` :ref:`figure_membership2`
+
+:numref:`figure_membership3` :ref:`figure_membership3`
+
+:numref:`figure_membership4` :ref:`figure_membership4`
+
+:numref:`figure_membership5` :ref:`figure_membership5`
+
+:numref:`figure_membership6` :ref:`figure_membership6`
+
+:numref:`figure_membership7` :ref:`figure_membership7`
**Tables**
diff --git a/doc/guides/prog_guide/member_lib.rst b/doc/guides/prog_guide/member_lib.rst
new file mode 100644
index 00000000..caca8dc2
--- /dev/null
+++ b/doc/guides/prog_guide/member_lib.rst
@@ -0,0 +1,420 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+.. _member_library:
+
+Membership Library
+==================
+
+Introduction
+------------
+
+The DPDK Membership Library provides an API for DPDK applications to insert a
+new member, delete an existing member, or query the existence of a member in a
+given set, or a group of sets. For the case of a group of sets, the library
+will return not only whether the element has been inserted before in one of
+the sets but also which set it belongs to. The Membership Library is an
+extension and generalization of a traditional filter structure (for example
+Bloom Filter [Member-bloom]) that has multiple usages in a wide variety of
+workloads and applications. In general, the Membership Library is a data
+structure that provides a "set-summary" on whether a member belongs to a set,
+and as discussed in detail later, there are two advantages of using such a
+set-summary rather than operating on a "full-blown" complete list of elements:
+first, it has a much smaller storage requirement than storing the whole list of
+elements themselves, and secondly checking an element membership (or other
+operations) in this set-summary is much faster than checking it for the
+original full-blown complete list of elements.
+
+We use the term "Set-Summary" in this guide to refer to the space-efficient,
+probabilistic membership data structure that is provided by the library. A
+membership test for an element will return the set this element belongs to or
+that the element is "not-found" with very high probability of accuracy. Set-summary
+is a fundamental data aggregation component that can be used in many network
+(and other) applications. It is a crucial structure to address performance and
+scalability issues of diverse network applications including overlay networks,
+data-centric networks, flow table summaries, network statistics and
+traffic monitoring. A set-summary is useful for applications who need to
+include a list of elements while a complete list requires too much space
+and/or too much processing cost. In these situations, the set-summary works as
+a lossy hash-based representation of a set of members. It can dramatically
+reduce space requirement and significantly improve the performance of set
+membership queries at the cost of introducing a very small membership test error
+probability.
+
+.. _figure_membership1:
+.. figure:: img/member_i1.*
+
+ Example Usages of Membership Library
+
+There are various usages for a Membership Library in a very
+large set of applications and workloads. Interested readers can refer to
+[Member-survey] for a survey of possible networking usages. The above figure
+provide a small set of examples of using the Membership Library:
+
+* Sub-figure (a)
+ depicts a distributed web cache architecture where a collection of proxies
+ attempt to share their web caches (cached from a set of back-end web servers) to
+ provide faster responses to clients, and the proxies use the Membership
+ Library to share summaries of what web pages/objects they are caching. With the
+ Membership Library, a proxy receiving an http request will inquire the
+ set-summary to find its location and quickly determine whether to retrieve the
+ requested web page from a nearby proxy or from a back-end web server.
+
+* Sub-figure (b) depicts another example for using the Membership Library to
+ prevent routing loops which is typically done using slow TTL countdown and
+ dropping packets when TTL expires. As shown in Sub-figure (b), an embedded
+ set-summary in the packet header itself can be used to summarize the set of
+ nodes a packet has gone through, and each node upon receiving a packet can check
+ whether its id is a member of the set of visited nodes, and if it is, then a
+ routing loop is detected.
+
+* Sub-Figure (c) presents another usage of the Membership
+ Library to load-balance flows to worker threads with in-order guarantee where a
+ set-summary is used to query if a packet belongs to an existing flow or a new
+ flow. Packets belonging to a new flow are forwarded to the current least loaded
+ worker thread, while those belonging to an existing flow are forwarded to the
+ pre-assigned thread to guarantee in-order processing.
+
+* Sub-figure (d) highlights
+ yet another usage example in the database domain where a set-summary is used to
+ determine joins between sets instead of creating a join by comparing each
+ element of a set against the other elements in a different set, a join is done
+ on the summaries since they can efficiently encode members of a given set.
+
+Membership Library is a configurable library that is optimized to cover set
+membership functionality for both a single set and multi-set scenarios. Two set-summary
+schemes are presented including (a) vector of Bloom Filters and (b) Hash-Table based
+set-summary schemes with and without false negative probability.
+This guide first briefly describes these different types of set-summaries, usage examples for each,
+and then it highlights the Membership Library API.
+
+Vector of Bloom Filters
+-----------------------
+
+Bloom Filter (BF) [Member-bloom] is a well-known space-efficient
+probabilistic data structure that answers set membership queries (test whether
+an element is a member of a set) with some probability of false positives and
+zero false negatives; a query for an element returns either it is "possibly in
+a set" (with very high probability) or "definitely not in a set".
+
+The BF is a method for representing a set of ``n`` elements (for example flow keys
+in network applications domain) to support membership queries. The idea of BF is
+to allocate a bit-vector ``v`` with ``m`` bits, which are initially all set to 0. Then
+it chooses ``k`` independent hash functions ``h1``, ``h2``, ... ``hk`` with hash values range from
+``0`` to ``m-1`` to perform hashing calculations on each element to be inserted. Every time when an
+element ``X`` being inserted into the set, the bits at positions ``h1(X)``, ``h2(X)``, ...
+``hk(X)`` in ``v`` are set to 1 (any particular bit might be set to 1 multiple times
+for multiple different inserted elements). Given a query for any element ``Y``, the
+bits at positions ``h1(Y)``, ``h2(Y)``, ... ``hk(Y)`` are checked. If any of them is 0,
+then Y is definitely not in the set. Otherwise there is a high probability that
+Y is a member of the set with certain false positive probability. As shown in
+the next equation, the false positive probability can be made arbitrarily small
+by changing the number of hash functions (``k``) and the vector length (``m``).
+
+.. _figure_membership2:
+.. figure:: img/member_i2.*
+
+ Bloom Filter False Positive Probability
+
+Without BF, an accurate membership testing could involve a costly hash table
+lookup and full element comparison. The advantage of using a BF is to simplify
+the membership test into a series of hash calculations and memory accesses for a
+small bit-vector, which can be easily optimized. Hence the lookup throughput
+(set membership test) can be significantly faster than a normal hash table
+lookup with element comparison.
+
+.. _figure_membership3:
+.. figure:: img/member_i3.*
+
+ Detecting Routing Loops Using BF
+
+BF is used for applications that need only one set, and the
+membership of elements is checked against the BF. The example discussed
+in the above figure is one example of potential applications that uses only one
+set to capture the node IDs that have been visited so far by the packet. Each
+node will then check this embedded BF in the packet header for its own id, and
+if the BF indicates that the current node is definitely not in the set then a
+loop-free route is guaranteed.
+
+
+.. _figure_membership4:
+.. figure:: img/member_i4.*
+
+ Vector Bloom Filter (vBF) Overview
+
+To support membership test for both multiple sets and a single set,
+the library implements a Vector Bloom Filter (vBF) scheme.
+vBF basically composes multiple bloom filters into a vector of bloom filers.
+The membership test is conducted on all of the
+bloom filters concurrently to determine which set(s) it belongs to or none of
+them. The basic idea of vBF is shown in the above figure where an element is
+used to address multiple bloom filters concurrently and the bloom filter
+index(es) with a hit is returned.
+
+.. _figure_membership5:
+.. figure:: img/member_i5.*
+
+ vBF for Flow Scheduling to Worker Thread
+
+As previously mentioned, there are many usages of such structures. vBF is used
+for applications that need to check membership against multiple sets
+simultaneously. The example shown in the above figure uses a set to capture
+all flows being assigned for processing at a given worker thread. Upon receiving
+a packet the vBF is used to quickly figure out if this packet belongs to a new flow
+so as to be forwarded to the current least loaded worker thread, or otherwise it
+should be queued for an existing thread to guarantee in-order processing (i.e.
+the property of vBF to indicate right away that a given flow is a new one or
+not is critical to minimize response time latency).
+
+It should be noted that vBF can be implemented using a set of single bloom
+filters with sequential lookup of each BF. However, being able to concurrently
+search all set-summaries is a big throughput advantage. In the library, certain
+parallelism is realized by the implementation of checking all bloom filters
+together.
+
+
+Hash-Table based Set-Summaries
+------------------------------
+
+Hash-table based set-summary (HTSS) is another scheme in the membership library.
+Cuckoo filter [Member-cfilter] is an example of HTSS.
+HTSS supports multi-set membership testing like
+vBF does. However, while vBF is better for a small number of targets, HTSS is more suitable
+and can easily outperform vBF when the number of sets is
+large, since HTSS uses a single hash table for membership testing while vBF
+requires testing a series of Bloom Filters each corresponding to one set.
+As a result, generally speaking vBF is more adequate for the case of a small limited number of sets
+while HTSS should be used with a larger number of sets.
+
+.. _figure_membership6:
+.. figure:: img/member_i6.*
+
+ Using HTSS for Attack Signature Matching
+
+As shown in the above figure, attack signature matching where each set
+represents a certain signature length (for correctness of this example, an
+attack signature should not be a subset of another one) in the payload is a good
+example for using HTSS with 0% false negative (i.e., when an element returns not
+found, it has a 100% certainty that it is not a member of any set). The packet
+inspection application benefits from knowing right away that the current payload
+does not match any attack signatures in the database to establish its
+legitimacy, otherwise a deep inspection of the packet is needed.
+
+HTSS employs a similar but simpler data structure to a traditional hash table,
+and the major difference is that HTSS stores only the signatures but not the
+full keys/elements which can significantly reduce the footprint of the table.
+Along with the signature, HTSS also stores a value to indicate the target set.
+When looking up an element, the element is hashed and the HTSS is addressed
+to retrieve the signature stored. If the signature matches then the value is
+retrieved corresponding to the index of the target set which the element belongs
+to. Because signatures can collide, HTSS can still has false positive
+probability. Furthermore, if elements are allowed to be
+overwritten or evicted when the hash table becomes full, it will also have a
+false negative probability. We discuss this case in the next section.
+
+Set-Summaries with False Negative Probability
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As previously mentioned, traditional set-summaries (e.g. Bloom Filters) do not
+have a false negative probability, i.e., it is 100% certain when an element
+returns "not to be present" for a given set. However, the Membership Library
+also supports a set-summary probabilistic data structure based on HTSS which
+allows for false negative probability.
+
+In HTSS, when the hash table becomes full, keys/elements will fail to be added
+into the table and the hash table has to be resized to accommodate for these new
+elements, which can be expensive. However, if we allow new elements to overwrite
+or evict existing elements (as a cache typically does), then the resulting
+set-summary will begin to have false negative probability. This is because the
+element that was evicted from the set-summary may still be present in the target
+set. For subsequent inquiries the set-summary will falsely report the element
+not being in the set, hence having a false negative probability.
+
+The major usage of HTSS with false negative is to use it as a cache for
+distributing elements to different target sets. By allowing HTSS to evict old
+elements, the set-summary can keep track of the most recent elements
+(i.e. active) as a cache typically does. Old inactive elements (infrequently
+used elements) will automatically and eventually get evicted from the
+set-summary. It is worth noting that the set-summary still has false positive
+probability, which means the application either can tolerate certain false positive
+or it has fall-back path when false positive happens.
+
+.. _figure_membership7:
+.. figure:: img/member_i7.*
+
+ Using HTSS with False Negatives for Wild Card Classification
+
+HTSS with false negative (i.e. a cache) also has its wide set of applications.
+For example wild card flow classification (e.g. ACL rules) highlighted in the
+above figure is an example of such application. In that case each target set
+represents a sub-table with rules defined by a certain flow mask. The flow masks
+are non-overlapping, and for flows matching more than one rule only the highest
+priority one is inserted in the corresponding sub-table (interested readers can
+refer to the Open vSwitch (OvS) design of Mega Flow Cache (MFC) [Member-OvS]
+for further details). Typically the rules will have a large number of distinct
+unique masks and hence, a large number of target sets each corresponding to one
+mask. Because the active set of flows varies widely based on the network
+traffic, HTSS with false negative will act as a cache for <flowid, target ACL
+sub-table> pair for the current active set of flows. When a miss occurs (as
+shown in red in the above figure) the sub-tables will be searched sequentially
+one by one for a possible match, and when found the flow key and target
+sub-table will be inserted into the set-summary (i.e. cache insertion) so
+subsequent packets from the same flow don’t incur the overhead of the
+sequential search of sub-tables.
+
+Library API Overview
+--------------------
+
+The design goal of the Membership Library API is to be as generic as possible to
+support all the different types of set-summaries we discussed in previous
+sections and beyond. Fundamentally, the APIs need to include creation,
+insertion, deletion, and lookup.
+
+
+Set-summary Create
+~~~~~~~~~~~~~~~~~~
+
+The ``rte_member_create()`` function is used to create a set-summary structure, the input parameter
+is a struct to pass in parameters that needed to initialize the set-summary, while the function returns the
+pointer to the created set-summary or ``NULL`` if the creation failed.
+
+The general input arguments used when creating the set-summary should include ``name``
+which is the name of the created set-summary, *type* which is one of the types
+supported by the library (e.g. ``RTE_MEMBER_TYPE_HT`` for HTSS or ``RTE_MEMBER_TYPE_VBF`` for vBF), and ``key_len``
+which is the length of the element/key. There are other parameters
+are only used for certain type of set-summary, or which have a slightly different meaning for different types of set-summary.
+For example, ``num_keys`` parameter means the maximum number of entries for Hash table based set-summary.
+However, for bloom filter, this value means the expected number of keys that could be
+inserted into the bloom filter(s). The value is used to calculate the size of each
+bloom filter.
+
+We also pass two seeds: ``prim_hash_seed`` and
+``sec_hash_seed`` for the primary and secondary hash functions to calculate two independent hash values.
+``socket_id`` parameter is the NUMA socket ID for the memory used to create the
+set-summary. For HTSS, another parameter ``is_cache`` is used to indicate
+if this set-summary is a cache (i.e. with false negative probability) or not.
+For vBF, extra parameters are needed. For example, ``num_set`` is the number of
+sets needed to initialize the vector bloom filters. This number is equal to the
+number of bloom filters will be created.
+``false_pos_rate`` is the false positive rate. num_keys and false_pos_rate will be used to determine
+the number of hash functions and the bloom filter size.
+
+
+Set-summary Element Insertion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_member_add()`` function is used to insert an element/key into a set-summary structure. If it fails an
+error is returned. For success the returned value is dependent on the
+set-summary mode to provide extra information for the users. For vBF
+mode, a return value of 0 means a successful insert. For HTSS mode without false negative, the insert
+could fail with ``-ENOSPC`` if the table is full. With false negative (i.e. cache mode),
+for insert that does not cause any eviction (i.e. no overwriting happens to an
+existing entry) the return value is 0. For insertion that causes eviction, the return
+value is 1 to indicate such situation, but it is not an error.
+
+The input arguments for the function should include the ``key`` which is a pointer to the element/key that needs to
+be added to the set-summary, and ``set_id`` which is the set id associated
+with the key that needs to be added.
+
+
+Set-summary Element Lookup
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_member_lookup()`` function looks up a single key/element in the set-summary structure. It
+returns as soon as the first match is found. The return value is 1 if a
+match is found and 0 otherwise. The arguments for the function include ``key`` which is a pointer to the
+element/key that needs to be looked up, and ``set_id`` which is used to return the
+first target set id where the key has matched, if any.
+
+The ``rte_member_lookup_bulk()`` function is used to look up a bulk of keys/elements in the
+set-summary structure for their first match. Each key lookup returns as soon as the first match is found. The
+return value is the number of keys that find a match. The arguments of the function include ``keys``
+which is a pointer to a bulk of keys that are to be looked up,
+``num_keys`` is the number
+of keys that will be looked up, and ``set_ids`` are the return target set
+ids for the first match found for each of the input keys. ``set_ids`` is an array
+needs to be sized according to the ``num_keys``. If there is no match, the set id
+for that key will be set to RTE_MEMBER_NO_MATCH.
+
+The ``rte_member_lookup_multi()`` function looks up a single key/element in the
+set-summary structure for multiple matches. It
+returns ALL the matches (possibly more than one) found for this key when it
+is matched against all target sets (it is worth noting that for cache mode HTSS,
+the current implementation matches at most one target set). The return value is
+the number of matches
+that was found for this key (for cache mode HTSS the return value
+should be at most 1). The arguments for the function include ``key`` which is a pointer to the
+element/key that needs to be looked up, ``max_match_per_key`` which is to indicate the maximum number of matches
+the user expects to find for each key, and ``set_id`` which is used to return all
+target set ids where the key has matched, if any. The ``set_id`` array should be sized
+according to ``max_match_per_key``. For vBF, the maximum number of matches per key is equal
+to the number of sets. For HTSS, the maximum number of matches per key is equal to two time
+entry count per bucket. ``max_match_per_key`` should be equal or smaller than the maximum number of
+possible matches.
+
+The ``rte_membership_lookup_multi_bulk()`` function looks up a bulk of keys/elements in the
+set-summary structure for multiple matches, each key lookup returns ALL the matches (possibly more
+than one) found for this key when it is matched against all target sets (cache mode HTSS
+matches at most one target set). The
+return value is the number of keys that find one or more matches in the
+set-summary structure. The arguments of the
+function include ``keys`` which is
+a pointer to a bulk of keys that are to be looked up, ``num_keys`` is the number
+of keys that will be looked up, ``max_match_per_key`` is the possible
+maximum number of matches for each key, ``match_count`` which is the returned number
+of matches for each key, and ``set_ids`` are the returned target set
+ids for all matches found for each keys. ``set_ids`` is 2-D array
+containing a 1-D array for each key (the size of 1-D array per key should be set by the user according to ``max_match_per_key``).
+``max_match_per_key`` should be equal or smaller than the maximum number of
+possible matches, similar to ``rte_member_lookup_multi``.
+
+
+Set-summary Element Delete
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_membership_delete()`` function deletes an element/key from a set-summary structure, if it fails
+an error is returned. The input arguments should include ``key`` which is a pointer to the
+element/key that needs to be deleted from the set-summary, and ``set_id``
+which is the set id associated with the key to delete. It is worth noting that current
+implementation of vBF does not support deletion [1]_. An error code ``-EINVAL`` will be returned.
+
+.. [1] Traditional bloom filter does not support proactive deletion. Supporting proactive deletion require additional implementation and performance overhead.
+
+References
+-----------
+
+[Member-bloom] B H Bloom, "Space/Time Trade-offs in Hash Coding with Allowable Errors," Communications of the ACM, 1970.
+
+[Member-survey] A Broder and M Mitzenmacher, "Network Applications of Bloom Filters: A Survey," in Internet Mathematics, 2005.
+
+[Member-cfilter] B Fan, D G Andersen and M Kaminsky, "Cuckoo Filter: Practically Better Than Bloom," in Conference on emerging Networking Experiments and Technologies, 2014.
+
+[Member-OvS] B Pfaff, "The Design and Implementation of Open vSwitch," in NSDI, 2015.
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 1ac8f7eb..6a0c9f99 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -310,6 +310,26 @@ exported by each PMD. The list of flags and their precise meaning is
described in the mbuf API documentation and in the in :ref:`Mbuf Library
<Mbuf_Library>`, section "Meta Information".
+Per-Port and Per-Queue Offloads
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+Supported offloads can be either per-port or per-queue.
+
+Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
+Per-port offload configuration is set using ``rte_eth_dev_configure``.
+Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
+To enable per-port offload, the offload should be set on both device configuration and queue setup.
+In case of a mixed configuration the queue setup shall return with an error.
+To enable per-queue offload, the offload can be set only on the queue setup.
+Offloads which are not enabled are disabled by default.
+
+For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
+In such cases it is not required to set other flags in ``txq_flags``.
+For an application to use the Rx offloads API it should set the ``ignore_offload_bitfield`` bit in the ``rte_eth_rxmode`` struct.
+In such cases it is not required to set other bitfield offloads in the ``rxmode`` struct.
+
Poll Mode Driver API
--------------------
@@ -536,3 +556,43 @@ call. As an end result, the application is able to achieve its goal of
monitoring a single statistic ("rx_errors" in this case), and if that shows
packets being dropped, it can easily retrieve a "set" of statistics using the
IDs array parameter to ``rte_eth_xstats_get_by_id`` function.
+
+NIC Reset API
+~~~~~~~~~~~~~
+
+.. code-block:: c
+
+ int rte_eth_dev_reset(uint16_t port_id);
+
+Sometimes a port has to be reset passively. For example when a PF is
+reset, all its VFs should also be reset by the application to make them
+consistent with the PF. A DPDK application also can call this function
+to trigger a port reset. Normally, a DPDK application would invokes this
+function when an RTE_ETH_EVENT_INTR_RESET event is detected.
+
+It is the duty of the PMD to trigger RTE_ETH_EVENT_INTR_RESET events and
+the application should register a callback function to handle these
+events. When a PMD needs to trigger a reset, it can trigger an
+RTE_ETH_EVENT_INTR_RESET event. On receiving an RTE_ETH_EVENT_INTR_RESET
+event, applications can handle it as follows: Stop working queues, stop
+calling Rx and Tx functions, and then call rte_eth_dev_reset(). For
+thread safety all these operations should be called from the same thread.
+
+For example when PF is reset, the PF sends a message to notify VFs of
+this event and also trigger an interrupt to VFs. Then in the interrupt
+service routine the VFs detects this notification message and calls
+_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL,
+NULL). This means that a PF reset triggers an RTE_ETH_EVENT_INTR_RESET
+event within VFs. The function _rte_eth_dev_callback_process() will
+call the registered callback function. The callback function can trigger
+the application to handle all operations the VF reset requires including
+stopping Rx/Tx queues and calling rte_eth_dev_reset().
+
+The rte_eth_dev_reset() itself is a generic function which only does
+some hardware reset operations through calling dev_unint() and
+dev_init(), and itself does not handle synchronization, which is handled
+by application.
+
+The PMD itself should not call rte_eth_dev_reset(). The PMD can trigger
+the application to handle reset event. It is duty of application to
+handle all synchronization before it calls rte_eth_dev_reset().
diff --git a/doc/guides/prog_guide/port_hotplug_framework.rst b/doc/guides/prog_guide/port_hotplug_framework.rst
index ee765773..fb0efc18 100644
--- a/doc/guides/prog_guide/port_hotplug_framework.rst
+++ b/doc/guides/prog_guide/port_hotplug_framework.rst
@@ -101,10 +101,6 @@ Limitations
* The framework can only be enabled with Linux. BSD is not supported.
-* To detach a port, the port should be backed by a device that igb_uio
- or VFIO manages.
-
* Not all PMDs support detaching feature.
- To know whether a PMD can support detaching, search for the
- "RTE_ETH_DEV_DETACHABLE" flag in rte_eth_dev::data::dev_flags. If the flag is
- defined in the PMD, detaching is supported.
+ The underlying bus must support hot-unplug. If not supported,
+ the function ``rte_eth_dev_detach()`` will return negative ENOTSUP.
diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 114d0b1c..c5d62a39 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -89,6 +89,14 @@ Core state can be altered by speculative sleeps whenever the specified lcore has
In the DPDK, if no packet is received after polling,
speculative sleeps can be triggered according the strategies defined by the user space application.
+Per-core Turbo Boost
+--------------------
+
+Individual cores can be allowed to enter a Turbo Boost state on a per-core
+basis. This is achieved by enabling Turbo Boost Technology in the BIOS, then
+looping through the relevant cores and enabling/disabling Turbo Boost on each
+core.
+
API Overview of the Power Library
---------------------------------
@@ -108,6 +116,10 @@ The main methods exported by power library are for CPU frequency scaling and inc
* **Freq set**: Prompt the kernel to set the frequency for the specific lcore.
+* **Enable turbo**: Prompt the kernel to enable Turbo Boost for the specific lcore.
+
+* **Disable turbo**: Prompt the kernel to disable Turbo Boost for the specific lcore.
+
User Cases
----------
diff --git a/doc/guides/prog_guide/profile_app.rst b/doc/guides/prog_guide/profile_app.rst
index 54b546ac..ca1c91f6 100644
--- a/doc/guides/prog_guide/profile_app.rst
+++ b/doc/guides/prog_guide/profile_app.rst
@@ -39,7 +39,8 @@ Profiling on x86
----------------
Intel processors provide performance counters to monitor events.
-Some tools provided by Intel, such as VTune, can be used to profile and benchmark an application.
+Some tools provided by Intel, such as Intel® VTune™ Amplifier, can be used
+to profile and benchmark an application.
See the *VTune Performance Analyzer Essentials* publication from Intel Press for more information.
For a DPDK application, this can be done in a Linux* application environment only.
@@ -59,6 +60,40 @@ Refer to the
for details about application profiling.
+Empty cycles tracing
+~~~~~~~~~~~~~~~~~~~~
+
+Iterations that yielded no RX packets (empty cycles, wasted iterations) can
+be analyzed using VTune Amplifier. This profiling employs the
+`Instrumentation and Tracing Technology (ITT) API
+<https://software.intel.com/en-us/node/544195>`_
+feature of VTune Amplifier and requires only reconfiguring the DPDK library,
+no changes in a DPDK application are needed.
+
+To trace wasted iterations on RX queues, first reconfigure DPDK with
+``CONFIG_RTE_ETHDEV_RXTX_CALLBACKS`` and
+``CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS`` enabled.
+
+Then rebuild DPDK, specifying paths to the ITT header and library, which can
+be found in any VTune Amplifier distribution in the *include* and *lib*
+directories respectively:
+
+.. code-block:: console
+
+ make EXTRA_CFLAGS=-I<path to ittnotify.h> \
+ EXTRA_LDLIBS="-L<path to libittnotify.a> -littnotify"
+
+Finally, to see wasted iterations in your performance analysis results,
+select the *"Analyze user tasks, events, and counters"* checkbox in the
+*"Analysis Type"* tab when configuring analysis via VTune Amplifier GUI.
+Alternatively, when running VTune Amplifier via command line, specify
+``-knob enable-user-tasks=true`` option.
+
+Collected regions of wasted iterations will be marked on VTune Amplifier's
+timeline as ITT tasks. These ITT tasks have predefined names, containing
+Ethernet device and RX queue identifiers.
+
+
Profiling on ARM64
------------------
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 662a9123..d158be5e 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -187,7 +187,7 @@ Pattern item
Pattern items fall in two categories:
- Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
- IPV6, ICMP, UDP, TCP, SCTP, VXLAN, MPLS, GRE and so on), usually
+ IPV6, ICMP, UDP, TCP, SCTP, VXLAN, MPLS, GRE, ESP and so on), usually
associated with a specification structure.
- Matching meta-data or affecting pattern processing (END, VOID, INVERT, PF,
@@ -955,6 +955,31 @@ Usage example, fuzzy match a TCPv4 packets:
| 4 | END |
+-------+----------+
+Item: ``GTP``, ``GTPC``, ``GTPU``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches a GTPv1 header.
+
+Note: GTP, GTPC and GTPU use the same structure. GTPC and GTPU item
+are defined for a user-friendly API when creating GTP-C and GTP-U
+flow rules.
+
+- ``v_pt_rsv_flags``: version (3b), protocol type (1b), reserved (1b),
+ extension header flag (1b), sequence number flag (1b), N-PDU number
+ flag (1b).
+- ``msg_type``: message type.
+- ``msg_len``: message length.
+- ``teid``: tunnel endpoint identifier.
+- Default ``mask`` matches teid only.
+
+Item: ``ESP``
+^^^^^^^^^^^^^
+
+Matches an ESP header.
+
+- ``hdr``: ESP header definition (``rte_esp.h``).
+- Default ``mask`` matches SPI only.
+
Actions
~~~~~~~
@@ -972,7 +997,7 @@ They fall in three categories:
additional processing by subsequent flow rules.
- Other non-terminating meta actions that do not affect the fate of packets
- (END, VOID, MARK, FLAG, COUNT).
+ (END, VOID, MARK, FLAG, COUNT, SECURITY).
When several actions are combined in a flow rule, they should all have
different types (e.g. dropping a packet twice is not possible).
@@ -1354,6 +1379,101 @@ rule or if packets are not addressed to a VF in the first place.
| ``vf`` | VF ID to redirect packets to |
+--------------+--------------------------------+
+Action: ``METER``
+^^^^^^^^^^^^^^^^^
+
+Applies a stage of metering and policing.
+
+The metering and policing (MTR) object has to be first created using the
+rte_mtr_create() API function. The ID of the MTR object is specified as
+action parameter. More than one flow can use the same MTR object through
+the meter action. The MTR object can be further updated or queried using
+the rte_mtr* API.
+
+- Non-terminating by default.
+
+.. _table_rte_flow_action_meter:
+
+.. table:: METER
+
+ +--------------+---------------+
+ | Field | Value |
+ +==============+===============+
+ | ``mtr_id`` | MTR object ID |
+ +--------------+---------------+
+
+Action: ``SECURITY``
+^^^^^^^^^^^^^^^^^^^^
+
+Perform the security action on flows matched by the pattern items
+according to the configuration of the security session.
+
+This action modifies the payload of matched flows. For INLINE_CRYPTO, the
+security protocol headers and IV are fully provided by the application as
+specified in the flow pattern. The payload of matching packets is
+encrypted on egress, and decrypted and authenticated on ingress.
+For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
+providing full encapsulation and decapsulation of packets in security
+protocols. The flow pattern specifies both the outer security header fields
+and the inner packet fields. The security session specified in the action
+must match the pattern parameters.
+
+The security session specified in the action must be created on the same
+port as the flow action that is being specified.
+
+The ingress/egress flow attribute should match that specified in the
+security session if the security session supports the definition of the
+direction.
+
+Multiple flows can be configured to use the same security session.
+
+- Non-terminating by default.
+
+.. _table_rte_flow_action_security:
+
+.. table:: SECURITY
+
+ +----------------------+--------------------------------------+
+ | Field | Value |
+ +======================+======================================+
+ | ``security_session`` | security session to apply |
+ +----------------------+--------------------------------------+
+
+The following is an example of configuring IPsec inline using the
+INLINE_CRYPTO security session:
+
+The encryption algorithm, keys and salt are part of the opaque
+``rte_security_session``. The SA is identified according to the IP and ESP
+fields in the pattern items.
+
+.. _table_rte_flow_item_esp_inline_example:
+
+.. table:: IPsec inline crypto flow pattern items.
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | Ethernet |
+ +-------+----------+
+ | 1 | IPv4 |
+ +-------+----------+
+ | 2 | ESP |
+ +-------+----------+
+ | 3 | END |
+ +-------+----------+
+
+.. _table_rte_flow_action_esp_inline_example:
+
+.. table:: IPsec inline flow actions.
+
+ +-------+----------+
+ | Index | Action |
+ +=======+==========+
+ | 0 | SECURITY |
+ +-------+----------+
+ | 1 | END |
+ +-------+----------+
+
Negative types
~~~~~~~~~~~~~~
@@ -1401,7 +1521,7 @@ supported and can be created.
.. code-block:: c
int
- rte_flow_validate(uint8_t port_id,
+ rte_flow_validate(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1456,7 +1576,7 @@ actually created and a handle returned.
.. code-block:: c
struct rte_flow *
- rte_flow_create(uint8_t port_id,
+ rte_flow_create(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action *actions[],
@@ -1488,7 +1608,7 @@ performing this step before releasing resources.
.. code-block:: c
int
- rte_flow_destroy(uint8_t port_id,
+ rte_flow_destroy(uint16_t port_id,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -1519,7 +1639,7 @@ port. They are released as with successive calls to ``rte_flow_destroy()``.
.. code-block:: c
int
- rte_flow_flush(uint8_t port_id,
+ rte_flow_flush(uint16_t port_id,
struct rte_flow_error *error);
In the unlikely event of failure, handles are still considered destroyed and
@@ -1547,7 +1667,7 @@ definition.
.. code-block:: c
int
- rte_flow_query(uint8_t port_id,
+ rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
enum rte_flow_action_type action,
void *data,
@@ -1620,7 +1740,7 @@ port and may return errors such as ``ENOTSUP`` ("not supported"):
.. code-block:: c
int
- rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
+ rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
Arguments:
@@ -1678,6 +1798,25 @@ freed by the application, however its pointer can be considered valid only
as long as its associated DPDK port remains configured. Closing the
underlying device or unloading the PMD invalidates it.
+Helpers
+-------
+
+Error initializer
+~~~~~~~~~~~~~~~~~
+
+.. code-block:: c
+
+ static inline int
+ rte_flow_error_set(struct rte_flow_error *error,
+ int code,
+ enum rte_flow_error_type type,
+ const void *cause,
+ const char *message);
+
+This function initializes ``error`` (if non-NULL) with the provided
+parameters and sets ``rte_errno`` to ``code``. A negative error ``code`` is
+then returned.
+
Caveats
-------
@@ -1743,13 +1882,11 @@ the legacy filtering framework, which should eventually disappear.
whatsoever). They only make sure these callbacks are non-NULL or return
the ``ENOSYS`` (function not supported) error.
-This interface additionally defines the following helper functions:
+This interface additionally defines the following helper function:
- ``rte_flow_ops_get()``: get generic flow operations structure from a
port.
-- ``rte_flow_error_set()``: initialize generic flow error structure.
-
More will be added over time.
Device compatibility
diff --git a/doc/guides/prog_guide/rte_security.rst b/doc/guides/prog_guide/rte_security.rst
new file mode 100644
index 00000000..71be036c
--- /dev/null
+++ b/doc/guides/prog_guide/rte_security.rst
@@ -0,0 +1,564 @@
+.. BSD LICENSE
+ Copyright 2017 NXP.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Security Library
+================
+
+The security library provides a framework for management and provisioning
+of security protocol operations offloaded to hardware based devices. The
+library defines generic APIs to create and free security sessions which can
+support full protocol offload as well as inline crypto operation with
+NIC or crypto devices. The framework currently only supports the IPSec protocol
+and associated operations, other protocols will be added in future.
+
+Design Principles
+-----------------
+
+The security library provides an additional offload capability to an existing
+crypto device and/or ethernet device.
+
+.. code-block:: console
+
+ +---------------+
+ | rte_security |
+ +---------------+
+ \ /
+ +-----------+ +--------------+
+ | NIC PMD | | CRYPTO PMD |
+ +-----------+ +--------------+
+
+.. note::
+
+ Currently, the security library does not support the case of multi-process.
+ It will be updated in the future releases.
+
+The supported offload types are explained in the sections below.
+
+Inline Crypto
+~~~~~~~~~~~~~
+
+RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+The crypto processing for security protocol (e.g. IPSec) is processed
+inline during receive and transmission on NIC port. The flow based
+security action should be configured on the port.
+
+Ingress Data path - The packet is decrypted in RX path and relevant
+crypto status is set in Rx descriptors. After the successful inline
+crypto processing the packet is presented to host as a regular Rx packet
+however all security protocol related headers are still attached to the
+packet. e.g. In case of IPSec, the IPSec tunnel headers (if any),
+ESP/AH headers will remain in the packet but the received packet
+contains the decrypted data where the encrypted data was when the packet
+arrived. The driver Rx path check the descriptors and and based on the
+crypto status sets additional flags in the rte_mbuf.ol_flags field.
+
+.. note::
+
+ The underlying device may not support crypto processing for all ingress packet
+ matching to a particular flow (e.g. fragmented packets), such packets will
+ be passed as encrypted packets. It is the responsibility of application to
+ process such encrypted packets using other crypto driver instance.
+
+Egress Data path - The software prepares the egress packet by adding
+relevant security protocol headers. Only the data will not be
+encrypted by the software. The driver will accordingly configure the
+tx descriptors. The hardware device will encrypt the data before sending the
+the packet out.
+
+.. note::
+
+ The underlying device may support post encryption TSO.
+
+.. code-block:: console
+
+ Egress Data Path
+ |
+ +--------|--------+
+ | egress IPsec |
+ | | |
+ | +------V------+ |
+ | | SADB lookup | |
+ | +------|------+ |
+ | +------V------+ |
+ | | Tunnel | | <------ Add tunnel header to packet
+ | +------|------+ |
+ | +------V------+ |
+ | | ESP | | <------ Add ESP header without trailer to packet
+ | | | | <------ Mark packet to be offloaded, add trailer
+ | +------|------+ | meta-data to mbuf
+ +--------V--------+
+ |
+ +--------V--------+
+ | L2 Stack |
+ +--------|--------+
+ |
+ +--------V--------+
+ | |
+ | NIC PMD | <------ Set hw context for inline crypto offload
+ | |
+ +--------|--------+
+ |
+ +--------|--------+
+ | HW ACCELERATED | <------ Packet Encryption and
+ | NIC | Authentication happens inline
+ | |
+ +-----------------+
+
+
+Inline protocol offload
+~~~~~~~~~~~~~~~~~~~~~~~
+
+RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+The crypto and protocol processing for security protocol (e.g. IPSec)
+is processed inline during receive and transmission. The flow based
+security action should be configured on the port.
+
+Ingress Data path - The packet is decrypted in the RX path and relevant
+crypto status is set in the Rx descriptors. After the successful inline
+crypto processing the packet is presented to the host as a regular Rx packet
+but all security protocol related headers are optionally removed from the
+packet. e.g. in the case of IPSec, the IPSec tunnel headers (if any),
+ESP/AH headers will be removed from the packet and the received packet
+will contains the decrypted packet only. The driver Rx path checks the
+descriptors and based on the crypto status sets additional flags in
+``rte_mbuf.ol_flags`` field.
+
+.. note::
+
+ The underlying device in this case is stateful. It is expected that
+ the device shall support crypto processing for all kind of packets matching
+ to a given flow, this includes fragmented packets (post reassembly).
+ E.g. in case of IPSec the device may internally manage anti-replay etc.
+ It will provide a configuration option for anti-replay behavior i.e. to drop
+ the packets or pass them to driver with error flags set in the descriptor.
+
+Egress Data path - The software will send the plain packet without any
+security protocol headers added to the packet. The driver will configure
+the security index and other requirement in tx descriptors.
+The hardware device will do security processing on the packet that includes
+adding the relevant protocol headers and encrypting the data before sending
+the packet out. The software should make sure that the buffer
+has required head room and tail room for any protocol header addition. The
+software may also do early fragmentation if the resultant packet is expected
+to cross the MTU size.
+
+
+.. note::
+
+ The underlying device will manage state information required for egress
+ processing. E.g. in case of IPSec, the seq number will be added to the
+ packet, however the device shall provide indication when the sequence number
+ is about to overflow. The underlying device may support post encryption TSO.
+
+.. code-block:: console
+
+ Egress Data Path
+ |
+ +--------|--------+
+ | egress IPsec |
+ | | |
+ | +------V------+ |
+ | | SADB lookup | |
+ | +------|------+ |
+ | +------V------+ |
+ | | Desc | | <------ Mark packet to be offloaded
+ | +------|------+ |
+ +--------V--------+
+ |
+ +--------V--------+
+ | L2 Stack |
+ +--------|--------+
+ |
+ +--------V--------+
+ | |
+ | NIC PMD | <------ Set hw context for inline crypto offload
+ | |
+ +--------|--------+
+ |
+ +--------|--------+
+ | HW ACCELERATED | <------ Add tunnel, ESP header etc header to
+ | NIC | packet. Packet Encryption and
+ | | Authentication happens inline.
+ +-----------------+
+
+
+Lookaside protocol offload
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+This extends librte_cryptodev to support the programming of IPsec
+Security Association (SA) as part of a crypto session creation including
+the definition. In addition to standard crypto processing, as defined by
+the cryptodev, the security protocol processing is also offloaded to the
+crypto device.
+
+Decryption: The packet is sent to the crypto device for security
+protocol processing. The device will decrypt the packet and it will also
+optionally remove additional security headers from the packet.
+E.g. in case of IPSec, IPSec tunnel headers (if any), ESP/AH headers
+will be removed from the packet and the decrypted packet may contain
+plain data only.
+
+.. note::
+
+ In case of IPSec the device may internally manage anti-replay etc.
+ It will provide a configuration option for anti-replay behavior i.e. to drop
+ the packets or pass them to driver with error flags set in descriptor.
+
+Encryption: The software will submit the packet to cryptodev as usual
+for encryption, the hardware device in this case will also add the relevant
+security protocol header along with encrypting the packet. The software
+should make sure that the buffer has required head room and tail room
+for any protocol header addition.
+
+.. note::
+
+ In the case of IPSec, the seq number will be added to the packet,
+ It shall provide an indication when the sequence number is about to
+ overflow.
+
+.. code-block:: console
+
+ Egress Data Path
+ |
+ +--------|--------+
+ | egress IPsec |
+ | | |
+ | +------V------+ |
+ | | SADB lookup | | <------ SA maps to cryptodev session
+ | +------|------+ |
+ | +------|------+ |
+ | | \--------------------\
+ | | Crypto | | | <- Crypto processing through
+ | | /----------------\ | inline crypto PMD
+ | +------|------+ | | |
+ +--------V--------+ | |
+ | | |
+ +--------V--------+ | | create <-- SA is added to hw
+ | L2 Stack | | | inline using existing create
+ +--------|--------+ | | session sym session APIs
+ | | | |
+ +--------V--------+ +---|---|----V---+
+ | | | \---/ | | <--- Add tunnel, ESP header etc
+ | NIC PMD | | INLINE | | header to packet.Packet
+ | | | CRYPTO PMD | | Encryption/Decryption and
+ +--------|--------+ +----------------+ Authentication happens
+ | inline.
+ +--------|--------+
+ | NIC |
+ +--------|--------+
+ V
+
+Device Features and Capabilities
+---------------------------------
+
+Device Capabilities For Security Operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The device (crypto or ethernet) capabilities which support security operations,
+are defined by the security action type, security protocol, protocol
+capabilities and corresponding crypto capabilities for security. For the full
+scope of the Security capability see definition of rte_security_capability
+structure in the *DPDK API Reference*.
+
+.. code-block:: c
+
+ struct rte_security_capability;
+
+Each driver (crypto or ethernet) defines its own private array of capabilities
+for the operations it supports. Below is an example of the capabilities for a
+PMD which supports the IPSec protocol.
+
+.. code-block:: c
+
+ static const struct rte_security_capability pmd_security_capabilities[] = {
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = pmd_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = pmd_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+ };
+ static const struct rte_cryptodev_capabilities pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ .auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 },
+ .iv_size = { 0 }
+ }
+ }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ .cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }
+ }
+ }
+ }
+
+
+Capabilities Discovery
+~~~~~~~~~~~~~~~~~~~~~~
+
+Discovering the features and capabilities of a driver (crypto/ethernet)
+is achieved through the ``rte_security_capabilities_get()`` function.
+
+.. code-block:: c
+
+ const struct rte_security_capability *rte_security_capabilities_get(uint16_t id);
+
+This allows the user to query a specific driver and get all device
+security capabilities. It returns an array of ``rte_security_capability`` structures
+which contains all the capabilities for that device.
+
+Security Session Create/Free
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Security Sessions are created to store the immutable fields of a particular Security
+Association for a particular protocol which is defined by a security session
+configuration structure which is used in the operation processing of a packet flow.
+Sessions are used to manage protocol specific information as well as crypto parameters.
+Security sessions cache this immutable data in a optimal way for the underlying PMD
+and this allows further acceleration of the offload of Crypto workloads.
+
+The Security framework provides APIs to create and free sessions for crypto/ethernet
+devices, where sessions are mempool objects. It is the application's responsibility
+to create and manage the session mempools. The mempool object size should be able to
+accommodate the driver's private data of security session.
+
+Once the session mempools have been created, ``rte_security_session_create()``
+is used to allocate and initialize a session for the required crypto/ethernet device.
+
+Session APIs need a parameter ``rte_security_ctx`` to identify the crypto/ethernet
+security ops. This parameter can be retrieved using the APIs
+``rte_cryptodev_get_sec_ctx()`` (for crypto device) or ``rte_eth_dev_get_sec_ctx``
+(for ethernet port).
+
+Sessions already created can be updated with ``rte_security_session_update()``.
+
+When a session is no longer used, the user must call ``rte_security_session_destroy()``
+to free the driver private session data and return the memory back to the mempool.
+
+For look aside protocol offload to hardware crypto device, the ``rte_crypto_op``
+created by the application is attached to the security session by the API
+``rte_security_attach_session()``.
+
+For Inline Crypto and Inline protocol offload, device specific defined metadata is
+updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
+``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
+
+Security session configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Security Session configuration structure is defined as ``rte_security_session_conf``
+
+.. code-block:: c
+
+ struct rte_security_session_conf {
+ enum rte_security_session_action_type action_type;
+ /**< Type of action to be performed on the session */
+ enum rte_security_session_protocol protocol;
+ /**< Security protocol to be configured */
+ union {
+ struct rte_security_ipsec_xform ipsec;
+ struct rte_security_macsec_xform macsec;
+ };
+ /**< Configuration parameters for security session */
+ struct rte_crypto_sym_xform *crypto_xform;
+ /**< Security Session Crypto Transformations */
+ };
+
+The configuration structure reuses the ``rte_crypto_sym_xform`` struct for crypto related
+configuration. The ``rte_security_session_action_type`` struct is used to specify whether the
+session is configured for Lookaside Protocol offload or Inline Crypto or Inline Protocol
+Offload.
+
+.. code-block:: c
+
+ enum rte_security_session_action_type {
+ RTE_SECURITY_ACTION_TYPE_NONE,
+ /**< No security actions */
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ /**< Crypto processing for security protocol is processed inline
+ * during transmission */
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ /**< All security protocol processing is performed inline during
+ * transmission */
+ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
+ /**< All security protocol processing including crypto is performed
+ * on a lookaside accelerator */
+ };
+
+The ``rte_security_session_protocol`` is defined as
+
+.. code-block:: c
+
+ enum rte_security_session_protocol {
+ RTE_SECURITY_PROTOCOL_IPSEC,
+ /**< IPsec Protocol */
+ RTE_SECURITY_PROTOCOL_MACSEC,
+ /**< MACSec Protocol */
+ };
+
+Currently the library defines configuration parameters for IPSec only. For other
+protocols like MACSec, structures and enums are defined as place holders which
+will be updated in the future.
+
+IPsec related configuration parameters are defined in ``rte_security_ipsec_xform``
+
+.. code-block:: c
+
+ struct rte_security_ipsec_xform {
+ uint32_t spi;
+ /**< SA security parameter index */
+ uint32_t salt;
+ /**< SA salt */
+ struct rte_security_ipsec_sa_options options;
+ /**< various SA options */
+ enum rte_security_ipsec_sa_direction direction;
+ /**< IPSec SA Direction - Egress/Ingress */
+ enum rte_security_ipsec_sa_protocol proto;
+ /**< IPsec SA Protocol - AH/ESP */
+ enum rte_security_ipsec_sa_mode mode;
+ /**< IPsec SA Mode - transport/tunnel */
+ struct rte_security_ipsec_tunnel_param tunnel;
+ /**< Tunnel parameters, NULL for transport mode */
+ };
+
+
+Security API
+~~~~~~~~~~~~
+
+The rte_security Library API is described in the *DPDK API Reference* document.
+
+Flow based Security Session
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the case of NIC based offloads, the security session specified in the
+'rte_flow_action_security' must be created on the same port as the
+flow action that is being specified.
+
+The ingress/egress flow attribute should match that specified in the security
+session if the security session supports the definition of the direction.
+
+Multiple flows can be configured to use the same security session. For
+example if the security session specifies an egress IPsec SA, then multiple
+flows can be specified to that SA. In the case of an ingress IPsec SA then
+it is only valid to have a single flow to map to that security session.
+
+.. code-block:: console
+
+ Configuration Path
+ |
+ +--------|--------+
+ | Add/Remove |
+ | IPsec SA | <------ Build security flow action of
+ | | | ipsec transform
+ |--------|--------|
+ |
+ +--------V--------+
+ | Flow API |
+ +--------|--------+
+ |
+ +--------V--------+
+ | |
+ | NIC PMD | <------ Add/Remove SA to/from hw context
+ | |
+ +--------|--------+
+ |
+ +--------|--------+
+ | HW ACCELERATED |
+ | NIC |
+ | |
+ +--------|--------+
+
+* Add/Delete SA flow:
+ To add a new inline SA construct a rte_flow_item for Ethernet + IP + ESP
+ using the SA selectors and the ``rte_crypto_ipsec_xform`` as the ``rte_flow_action``.
+ Note that any rte_flow_items may be empty, which means it is not checked.
+
+.. code-block:: console
+
+ In its most basic form, IPsec flow specification is as follows:
+ +-------+ +----------+ +--------+ +-----+
+ | Eth | -> | IP4/6 | -> | ESP | -> | END |
+ +-------+ +----------+ +--------+ +-----+
+
+ However, the API can represent, IPsec crypto offload with any encapsulation:
+ +-------+ +--------+ +-----+
+ | Eth | -> ... -> | ESP | -> | END |
+ +-------+ +--------+ +-----+
diff --git a/doc/guides/prog_guide/source_org.rst b/doc/guides/prog_guide/source_org.rst
index d5d01f38..7aab4b45 100644
--- a/doc/guides/prog_guide/source_org.rst
+++ b/doc/guides/prog_guide/source_org.rst
@@ -108,7 +108,6 @@ The drivers directory has a *net* subdirectory which contains::
+-- szedata2 # SZEDATA2 poll mode driver
+-- virtio # Virtio poll mode driver
+-- vmxnet3 # VMXNET3 poll mode driver
- +-- xenvirt # Xen virtio poll mode driver
.. note::
diff --git a/doc/guides/prog_guide/traffic_metering_and_policing.rst b/doc/guides/prog_guide/traffic_metering_and_policing.rst
new file mode 100644
index 00000000..89f0e68b
--- /dev/null
+++ b/doc/guides/prog_guide/traffic_metering_and_policing.rst
@@ -0,0 +1,102 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Traffic Metering and Policing API
+=================================
+
+
+Overview
+--------
+
+This is the generic API for the Quality of Service (QoS) Traffic Metering and
+Policing (MTR) of Ethernet devices. This API is agnostic of the underlying HW,
+SW or mixed HW-SW implementation.
+
+The main features are:
+
+* Part of DPDK rte_ethdev API
+* Capability query API
+* Metering algorithms: RFC 2697 Single Rate Three Color Marker (srTCM), RFC 2698
+ and RFC 4115 Two Rate Three Color Marker (trTCM)
+* Policer actions (per meter output color): recolor, drop
+* Statistics (per policer output color)
+
+Configuration steps
+-------------------
+
+The metering and policing stage typically sits on top of flow classification,
+which is why the MTR objects are enabled through a special "meter" action.
+
+The MTR objects are created and updated in their own name space (``rte_mtr``)
+within the ``librte_ether`` library. Whether an MTR object is private to a
+flow or potentially shared by several flows has to be specified at its
+creation time.
+
+Once successfully created, an MTR object is hooked into the RX processing path
+of the Ethernet device by linking it to one or several flows through the
+dedicated "meter" flow action. One or several "meter" actions can be registered
+for the same flow. An MTR object can only be destroyed if there are no flows
+using it.
+
+Run-time processing
+-------------------
+
+Traffic metering determines the color for the current packet (green, yellow,
+red) based on the previous history for this flow as maintained by the MTR
+object. The policer can do nothing, override the color the packet or drop the
+packet. Statistics counters are maintained for MTR object, as configured.
+
+The processing done for each input packet hitting an MTR object is:
+
+* Traffic metering: The packet is assigned a color (the meter output color)
+ based on the previous traffic history reflected in the current state of the
+ MTR object, according to the specific traffic metering algorithm. The
+ traffic metering algorithm can typically work in color aware mode, in which
+ case the input packet already has an initial color (the input color), or in
+ color blind mode, which is equivalent to considering all input packets
+ initially colored as green.
+
+* Policing: There is a separate policer action configured for each meter
+ output color, which can:
+
+ * Drop the packet.
+
+ * Keep the same packet color: the policer output color matches the meter
+ output color (essentially a no-op action).
+
+ * Recolor the packet: the policer output color is set to a different color
+ than the meter output color. The policer output color is the output color
+ of the packet, which is set in the packet meta-data (i.e. struct
+ ``rte_mbuf::sched::color``).
+
+* Statistics: The set of counters maintained for each MTR object is
+ configurable and subject to the implementation support. This set includes
+ the number of packets and bytes dropped or passed for each output color.
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 59792907..e71bdbd5 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -110,6 +110,20 @@ The following is an overview of some key Vhost API functions:
of those segments, thus the fewer the segments, the quicker we will get
the mapping. NOTE: we may speed it by using tree searching in future.
+ - ``RTE_VHOST_USER_IOMMU_SUPPORT``
+
+ IOMMU support will be enabled when this flag is set. It is disabled by
+ default.
+
+ Enabling this flag makes possible to use guest vIOMMU to protect vhost
+ from accessing memory the virtio device isn't allowed to, when the feature
+ is negotiated and an IOMMU device is declared.
+
+ However, this feature enables vhost-user's reply-ack protocol feature,
+ which implementation is buggy in Qemu v2.7.0-v2.9.0 when doing multiqueue.
+ Enabling this flag with these Qemu version results in Qemu being blocked
+ when multiple queue pairs are declared.
+
* ``rte_vhost_driver_set_features(path, features)``
This function sets the feature bits the vhost-user driver supports. The
@@ -129,8 +143,7 @@ The following is an overview of some key Vhost API functions:
* ``destroy_device(int vid)``
- This callback is invoked when a virtio device shuts down (or when the
- vhost connection is broken).
+ This callback is invoked when a virtio device is paused or shut down.
* ``vring_state_changed(int vid, uint16_t queue_id, int enable)``
@@ -143,6 +156,18 @@ The following is an overview of some key Vhost API functions:
``VHOST_F_LOG_ALL`` will be set/cleared at the start/end of live
migration, respectively.
+ * ``new_connection(int vid)``
+
+ This callback is invoked on new vhost-user socket connection. If DPDK
+ acts as the server the device should not be deleted before
+ ``destroy_connection`` callback is received.
+
+ * ``destroy_connection(int vid)``
+
+ This callback is invoked when vhost-user socket connection is closed.
+ It indicates that device with id ``vid`` is no longer in use and can be
+ safely deleted.
+
* ``rte_vhost_driver_disable/enable_features(path, features))``
This function disables/enables some features. For example, it can be used to
diff --git a/doc/guides/prog_guide/writing_efficient_code.rst b/doc/guides/prog_guide/writing_efficient_code.rst
index 8223acee..d7ac6778 100644
--- a/doc/guides/prog_guide/writing_efficient_code.rst
+++ b/doc/guides/prog_guide/writing_efficient_code.rst
@@ -105,6 +105,21 @@ meaning that if all memory access operations are done on the first channel only,
By default, the :ref:`Mempool Library <Mempool_Library>` spreads the addresses of objects among memory channels.
+Locking memory pages
+~~~~~~~~~~~~~~~~~~~~
+
+The underlying operating system is allowed to load/unload memory pages at its own discretion.
+These page loads could impact the performance, as the process is on hold when the kernel fetches them.
+
+To avoid these you could pre-load, and lock them into memory with the ``mlockall()`` call.
+
+.. code-block:: c
+
+ if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
+ RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
+ strerror(errno));
+ }
+
Communication Between lcores
----------------------------
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 3362f335..7fcebf13 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -8,14 +8,6 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
-* eal: the following functions are deprecated starting from 17.05 and will
- be removed in 17.11:
-
- - ``rte_set_log_level``, replaced by ``rte_log_set_global_level``
- - ``rte_get_log_level``, replaced by ``rte_log_get_global_level``
- - ``rte_set_log_type``, replaced by ``rte_log_set_level``
- - ``rte_get_log_type``, replaced by ``rte_log_get_level``
-
* eal: several API and ABI changes are planned for ``rte_devargs`` in v17.11.
The format of device command line parameters will change. The bus will need
to be explicitly stated in the device declaration. The enum ``rte_devtype``
@@ -29,29 +21,12 @@ Deprecation Notices
- ``rte_eal_devargs_type_count``
- ``rte_eal_parse_devargs_str``, replaced by ``rte_eal_devargs_parse``
-* eal: the support of Xen dom0 will be removed from EAL in 17.11; and with
- that, drivers/net/xenvirt and examples/vhost_xen will also be removed.
-
-* eal: An ABI change is planned for 17.11 to make DPDK aware of IOVA address
- translation scheme.
- Reference to phys address in EAL data-structure or functions may change to
- IOVA address or more appropriate name.
- The change will be only for the name.
- Functional aspects of the API or data-structure will remain same.
-
-* The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
- are respectively replaced by PKT_RX_VLAN_STRIPPED and
- PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
- their behavior will be kept until 17.08 and will be removed in 17.11.
-
-* mempool: The following will be modified in 17.11:
+* pci: Several exposed functions are misnamed.
+ The following functions are deprecated starting from v17.11 and are replaced:
- - ``rte_mempool_xmem_size`` and ``rte_mempool_xmem_usage`` need to know
- the mempool flag status so adding new param rte_mempool in those API.
- - Removing __rte_unused int flag param from ``rte_mempool_generic_put``
- and ``rte_mempool_generic_get`` API.
- - ``rte_mempool`` flags data type will changed from int to
- unsigned int.
+ - ``eal_parse_pci_BDF`` replaced by ``rte_pci_addr_parse``
+ - ``eal_parse_pci_DomBDF`` replaced by ``rte_pci_addr_parse``
+ - ``rte_eal_compare_pci_addr`` replaced by ``rte_pci_addr_cmp``
* ethdev: Tx offloads will no longer be enabled by default in 17.11.
Instead, the ``rte_eth_txmode`` structure will be extended with
@@ -68,55 +43,5 @@ Deprecation Notices
Target release for removal of the legacy API will be defined once most
PMDs have switched to rte_flow.
-* ethdev: The device flag advertizing hotplug capability
- ``RTE_ETH_DEV_DETACHABLE`` is not needed anymore and will be removed in
- v17.11.
- This capability is verified upon calling the relevant hotplug functions in EAL
- by checking that the ``unplug`` ops is set in the bus. This verification is
- done by the EAL and not by the ``ethdev`` layer anymore. Users relying on this
- flag being present only have to remove their checks to follow the change.
-
-* ABI/API changes are planned for 17.11 in all structures which include port_id
- definition such as "rte_eth_dev_data", "rte_port_ethdev_reader_params",
- "rte_port_ethdev_writer_params", and so on. The definition of port_id will be
- changed from 8 bits to 16 bits in order to support more than 256 ports in
- DPDK. All APIs which have port_id parameter will be changed at the same time.
-
-* ethdev: An ABI change is planned for 17.11 for the structure rte_eth_dev_data.
- The size of the unique name will increase RTE_ETH_NAME_MAX_LEN from 32 to
- 64 characters to allow using a globally unique identifier (GUID) in this field.
-
-* ethdev: new parameters - ``rte_security_capabilities`` and
- ``rte_security_ops`` will be added to ``rte_eth_dev_info`` and
- ``rte_eth_dev`` respectively to support security operations like
- ipsec inline.
-
-* cryptodev: new parameters - ``rte_security_capabilities`` and
- ``rte_security_ops`` will be added to ``rte_cryptodev_info`` and
- ``rte_cryptodev`` respectively to support security protocol offloaded
- operations.
-
-* cryptodev: the following function is deprecated starting from 17.08 and will
- be removed in 17.11:
-
- - ``rte_cryptodev_create_vdev``
-
-* cryptodev: the following function will be static in 17.11 and included
- by all crypto drivers, therefore, will not be public:
-
- - ``rte_cryptodev_vdev_pmd_init``
-
-* cryptodev: the following function will have an extra parameter, passing a
- statically allocated crypto driver structure, instead of calling malloc,
- in 17.11:
-
- - ``rte_cryptodev_allocate_driver``
-
* librte_meter: The API will change to accommodate configuration profiles.
Most of the API functions will have an additional opaque parameter.
-
-* librte_table: The ``key_mask`` parameter will be added to all the hash tables
- that currently do not have it, as well as to the hash compute function prototype.
- The non-"do-sig" versions of the hash tables will be removed
- (including the ``signature_offset`` parameter)
- and the "do-sig" versions renamed accordingly.
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index b3c8090a..35659d8a 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -36,6 +36,7 @@ Release Notes
:numbered:
rel_description
+ release_17_11
release_17_08
release_17_05
release_17_02
diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
new file mode 100644
index 00000000..e6e4407c
--- /dev/null
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -0,0 +1,600 @@
+DPDK Release 17.11
+==================
+
+.. **Read this first.**
+
+ The text in the sections below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text:
+ ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ xdg-open build/doc/html/guides/rel_notes/release_17_11.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample
+ format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to
+ understand the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like
+ this:
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Extended port_id range from uint8_t to uint16_t.**
+
+ Increased port_id range from 8 bits to 16 bits in order to support more than
+ 256 ports in dpdk. All ethdev APIs which have port_id as parameter are changed
+ in the meantime.
+
+* **Modified the return type of rte_eth_stats_reset.**
+
+ Changed return type of ``rte_eth_stats_reset`` from ``void`` to ``int``
+ so the caller may know whether a device supports the operation or not
+ and if the operation was carried out.
+
+* **Added a new driver for Marvell Armada 7k/8k devices.**
+
+ Added the new mrvl net driver for Marvell Armada 7k/8k devices. See the
+ "Network Interface Controller Drivers" document for more details on this new
+ driver.
+
+* **Updated mlx5 driver.**
+
+ Updated the mlx5 driver including the following changes:
+
+ * Enabled PMD to run on top of upstream linux kernel and rdma-core libs.
+ By that removed the dependency on specific Mellanox OFED libraries.
+ * Improved PMD latency performance.
+ * Improved PMD memory footprint.
+ * Supported vectorized Rx/Tx burst for ARMv8.
+ * Supported secondary process.
+ * Supported flow counters.
+ * Supported Rx hardware timestamp offload.
+ * Supported device removal event.
+
+* **Added SoftNIC PMD.**
+
+ Added new SoftNIC PMD. This virtual device offers applications a software
+ fallback support for traffic management.
+
+* **Added support for NXP DPAA Devices.**
+
+ Added support for NXP's DPAA devices - LS104x series. This includes:
+
+ * DPAA Bus driver
+ * DPAA Mempool driver for supporting offloaded packet memory pool
+ * DPAA PMD for DPAA devices
+
+ See the "Network Interface Controller Drivers" document for more details of
+ this new driver.
+
+* **Updated support for Cavium OCTEONTX Device.**
+
+ Updated support for Cavium's OCTEONTX device(CN83xx). This includes:
+
+ * OCTEONTX Mempool driver for supporting offloaded packet memory pool
+ * OCTEONTX Ethdev PMD
+ * OCTEONTX Eventdev-Ethdev Rx adapter
+
+ See the "Network Interface Controller Drivers" document for more details of
+ this new driver.
+
+* **nfp: Added PF support.**
+
+ Previously Netronome's NFP PMD had just support for VFs. PF support is
+ just as a basic DPDK port and has no VF management yet.
+
+ PF support comes with firmware upload support which allows the PMD to
+ independently work from kernel netdev NFP drivers.
+
+ NFP 4000 devices are also now supported along with previous 6000 devices.
+
+* **Updated bnxt PMD.**
+
+ Major enhancements include:
+
+ * Support for Flow API
+ * Support for Tx and Rx descriptor status functions
+
+* **Add bus agnostic functions to cryptodev for PMD initialisation**
+
+ Adds new PMD assist functions ``rte_cryptodev_pmd_parse_input_args()``,
+ ``rte_cryptodev_pmd_create()`` and ``rte_cryptodev_pmd_destroy()`` which
+ are bus independent for driver to manage creation and destruction of new
+ device instances.
+
+* **Updated QAT crypto PMD.**
+
+ Performance enhancements:
+
+ * Removed atomics from the internal queue pair structure.
+ * Coalesce writes to HEAD CSR on response processing.
+ * Coalesce writes to TAIL CSR on request processing.
+
+ Additional support for:
+
+ * AES CCM algorithm.
+
+* **Updated the AESNI MB PMD.**
+
+ The AESNI MB PMD has been updated with additional support for:
+
+ * DES CBC algorithm.
+ * DES DOCSIS BPI algorithm.
+
+ This requires the IPSec Multi-buffer library 0.47. For more details,
+ check out the AESNI MB PMD documenation.
+
+* **Updated the OpenSSL PMD.**
+
+ The OpenSSL PMD has been updated with additional support for:
+
+ * DES CBC algorithm.
+ * AES CCM algorithm.
+
+* **Added NXP DPAA SEC crypto PMD.**
+
+ A new "dpaa_sec" hardware based crypto PMD for NXP DPAA devices has been
+ added. See the "Crypto Device Drivers" document for more details on this
+ driver.
+
+* **Added MRVL crypto PMD.**
+
+ A new crypto PMD has been added, which provides several ciphering and hashing
+ algorithms. All cryptography operations use the MUSDK library crypto API.
+
+* **Add new benchmarking mode to dpdk-test-crypto-perf application.**
+
+ Added new "PMD cyclecount" benchmark mode to dpdk-test-crypto-perf application
+ that displays more detailed breakdown of CPU cycles used by hardware
+ acceleration.
+
+* **Added the Security Offload Library.**
+
+ Added an experimental library - rte_security. It provide security APIs for
+ protocols like IPSec using inline ipsec offload to ethernet device or full
+ protocol offload with lookaside crypto device.
+
+ See the "Security_Library" section of the DPDK Programmers Guide document,
+ for more information.
+
+* **Updated DPAA2_SEC crypto driver.**
+
+ Updated dpaa2_sec crypto PMD to support rte_security lookaside protocol
+ offload for IPSec.
+
+* **Updated IXGBE ethernet driver.**
+
+ Updated ixgbe ethernet PMD to support rte_security inline IPSec offload.
+
+* **Updated ipsec-secgw application**
+
+ Updated ipsec-secgw sample application to support rte_security actions for
+ ipsec inline and full protocol offload using lookaside crypto offload.
+
+* **Added IOMMU support to libvhost-user**
+
+ Implemented device IOTLB in Vhost-user backend, and enabled Virtio's IOMMU
+ feature. The feature is disabled by default, and can be enabled by setting
+ RTE_VHOST_USER_IOMMU_SUPPORT flag at vhost device registration time.
+
+* **Added the Event Ethernet Adapter Library.**
+
+ Added the Event Ethernet Adapter library. It provices APIs for
+ eventdev applications to configure the ethdev to eventdev packet flow.
+
+* **Updated DPAA2 Event PMD.**
+
+ Added support for eventdev ethernet adapter for DPAA2.
+
+* **Added Membership library (rte_member).**
+
+ Added membership library. It provides an API for DPDK applications to insert a
+ new member, delete an existing member, or query the existence of a member in a
+ given set, or a group of sets. For the case of a group of sets the library
+ will return not only whether the element has been inserted before in one of
+ the sets but also which set it belongs to.
+
+ The Membership Library is an extension and generalization of a traditional
+ filter (for example Bloom Filter) structure that has multiple usages in a wide
+ variety of workloads and applications. In general, the Membership Library is a
+ data structure that provides a “set-summary” and responds to set-membership
+ queries whether a certain member belongs to a set(s).
+
+ See the :ref:`Membership Library <Member_Library>` documentation in
+ the Programmers Guide document, for more information.
+
+* **Added the Generic Segmentation Offload Library.**
+
+ Added the Generic Segmentation Offload (GSO) library to enable
+ applications to split large packets (e.g. MTU is 64KB) into small
+ ones (e.g. MTU is 1500B). Supported packet types are:
+
+ * TCP/IPv4 packets.
+ * VxLAN packets, which must have an outer IPv4 header, and contain
+ an inner TCP/IPv4 packet.
+ * GRE packets, which must contain an outer IPv4 header, and inner
+ TCP/IPv4 headers.
+
+ The GSO library doesn't check if the input packets have correct
+ checksums, and doesn't update checksums for output packets.
+ Additionally, the GSO library doesn't process IP fragmented packets.
+
+* **Added the Flow Classification Library.**
+
+ Added the Flow Classification library, it provides an API for DPDK
+ applications to classify an input packet by matching it against a set of flow
+ rules. It uses the librte_table API to manage the flow rules.
+
+
+Resolved Issues
+---------------
+
+.. This section should contain bug fixes added to the relevant
+ sections. Sample format:
+
+ * **code/section Fixed issue in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description of the resolved issue in the past
+ tense.
+
+ The title should contain the code/lib section like a commit message.
+
+ Add the entries in alphabetic order in the relevant sections below.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+
+EAL
+~~~
+
+* **Service core fails to call service callback due to atomic lock**
+
+ In a specific configuration of multi-thread unsafe services and service
+ cores, a service core previously did not correctly release the atomic lock
+ on the service. This would result in the cores polling the service, but it
+ looked like another thread was executing the service callback. The logic for
+ atomic locking of the services has been fixed and refactored for readability.
+
+Drivers
+~~~~~~~
+
+
+Libraries
+~~~~~~~~~
+
+
+Examples
+~~~~~~~~
+
+
+Other
+~~~~~
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+ * **Add title in present tense with full stop.**
+
+ Add a short 1-2 sentence description of the known issue in the present
+ tense. Add information on any known workarounds.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past
+ tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Ethdev device name length increased**
+
+ The size of internal device name is increased to 64 characters
+ to allow for storing longer bus specific name.
+
+* **Ethdev flag RTE_ETH_DEV_DETACHABLE was removed**
+
+ This flag is not necessary anymore, with the new hotplug implementation.
+ It is now removed from the ether library. Its semantic is expressed at the bus
+ and PMD level.
+
+* **Service cores API updated for usability**
+
+ The service cores API has been changed, removing pointers from the API
+ where possible, instead using integer IDs to identify each service. This
+ simplifed application code, aids debugging, and provides better
+ encapsulation. A summary of the main changes made is as follows:
+
+ * Services identified by ID not by ``rte_service_spec`` pointer
+ * Reduced API surface by using ``set`` functions instead of enable/disable
+ * Reworked ``rte_service_register`` to provide the service ID to registrar
+ * Rework start and stop APIs into ``rte_service_runstate_set``
+ * Added API to set runstate of service implementation to indicate readyness
+
+* **The following changes made in mempool library**
+
+ * Moved ``flags`` datatype from int to unsigned int for ``rte_mempool``.
+ * Removed ``__rte_unused int flag`` param from ``rte_mempool_generic_put``
+ and ``rte_mempool_generic_get`` API.
+ * Added ``flags`` param in ``rte_mempool_xmem_size`` and
+ ``rte_mempool_xmem_usage``.
+
+* ``rte_mem_phy2mch`` was used in Xen dom0 to obtain the physical address;
+ remove this API as Xen dom0 support was removed.
+
+* **Some data type, structure members and functions related to physical address
+ are deprecated and have new alias with IOVA wording.**
+
+ * ``phys_addr_t`` can be often replaced by ``rte_iova_t`` of same size.
+ * ``RTE_BAD_PHYS_ADDR`` is often replaced by ``RTE_BAD_IOVA`` of same value.
+ * ``rte_memseg.phys_addr`` is aliased with ``rte_memseg.iova_addr``.
+ * ``rte_mem_virt2phy()`` can often be replaced by ``rte_mem_virt2iova``.
+ * ``rte_malloc_virt2phy`` is aliased with ``rte_malloc_virt2iova``.
+ * ``rte_memzone.phys_addr`` is aliased with ``rte_memzone.iova``.
+ * ``rte_mempool_objhdr.physaddr`` is aliased with ``rte_mempool_objhdr.iova``.
+ * ``rte_mempool_memhdr.phys_addr`` is aliased with ``rte_mempool_memhdr.iova``.
+ * ``rte_mempool_virt2phy()`` can be replaced by ``rte_mempool_virt2iova()``.
+ * ``rte_mempool_populate_phys*()`` are aliased with ``rte_mempool_populate_iova*()``
+ * ``rte_mbuf.buf_physaddr`` is aliased with ``rte_mbuf.buf_iova``.
+ * ``rte_mbuf_data_dma_addr*()`` are aliased with ``rte_mbuf_data_iova*()``.
+ * ``rte_pktmbuf_mtophys*`` are aliased with ``rte_pktmbuf_iova*()``.
+
+* **PCI bus API moved outside of the EAL**
+
+ The PCI bus previously implemented within the EAL has been moved.
+ A first part has been added as an RTE library providing PCI helpers to
+ parse device locations or other such utilities.
+ A second part consisting in the actual bus driver has been moved to its
+ proper subdirectory, without changing its functionalities.
+
+ As such, several PCI-related functions are not proposed by the EAL anymore:
+
+ * rte_pci_detach
+ * rte_pci_dump
+ * rte_pci_ioport_map
+ * rte_pci_ioport_read
+ * rte_pci_ioport_unmap
+ * rte_pci_ioport_write
+ * rte_pci_map_device
+ * rte_pci_probe
+ * rte_pci_probe_one
+ * rte_pci_read_config
+ * rte_pci_register
+ * rte_pci_scan
+ * rte_pci_unmap_device
+ * rte_pci_unregister
+ * rte_pci_write_config
+
+ These functions are made available either as part of ``librte_pci`` or
+ ``librte_bus_pci``.
+
+* **Moved vdev bus APIs outside of the EAL**
+
+ Moved the following APIs from ``librte_eal`` to ``librte_bus_vdev``:
+ * ``rte_vdev_init``
+ * ``rte_vdev_register``
+ * ``rte_vdev_uninit``
+ * ``rte_vdev_unregister``
+
+* **Add return value to stats_get dev op API**
+
+ The ``stats_get`` dev op API return value has been changed to be int.
+ By this way PMDs can return an error value in case of failure at stats
+ getting process time.
+
+* **Modified the rte_cryptodev_allocate_driver function in the cryptodev library.**
+
+ The function ``rte_cryptodev_allocate_driver()`` has been modified.
+ An extra parameter ``struct cryptodev_driver *crypto_drv`` has been added.
+
+* **Removed virtual device bus specific functions from librte_cryptodev.**
+
+ The functions ``rte_cryptodev_vdev_parse_init_params()`` and
+ ``rte_cryptodev_vdev_pmd_init()`` have been removed from librte_cryptodev
+ and have been replaced by non bus specific functions
+ ``rte_cryptodev_pmd_parse_input_args()`` and ``rte_cryptodev_pmd_create()``.
+
+* ``rte_cryptodev_create_vdev`` was removed to avoid the dependency on vdev
+ in librte_cryptodev; instead, users can call rte_vdev_init() directly.
+
+* **Removed PCI device bus specific functions from librte_cryptodev.**
+
+ The functions ``rte_cryptodev_pci_generic_probe()`` and
+ ``rte_cryptodev_pci_generic_remove()`` have been removed from librte_cryptodev
+ and have been replaced by non bus specific functions
+ ``rte_cryptodev_pmd_create()`` and ``rte_cryptodev_pmd_destroy()``.
+
+* **Removed deprecated functions to manage log level or type.**
+
+ The functions ``rte_set_log_level()``, ``rte_get_log_level()``,
+ ``rte_set_log_type()`` and ``rte_get_log_type()`` have been removed.
+ They are respectively replaced by ``rte_log_set_global_level()``,
+ ``rte_log_get_global_level()``, ``rte_log_set_level()`` and
+ ``rte_log_get_level()``.
+
+* **Removed ``mbuf`` flags ``PKT_RX_VLAN_PKT`` and ``PKT_RX_QINQ_PKT``.**
+
+ The ``mbuf`` flags ``PKT_RX_VLAN_PKT`` and ``PKT_RX_QINQ_PKT`` have
+ been removed since their behavior were not properly described.
+
+* **Added ``mbuf`` flags ``PKT_RX_VLAN`` and ``PKT_RX_QINQ``.**
+
+ Two ``mbuf`` flags have been added to indicate that the VLAN
+ identifier has been saved in in the ``mbuf`` structure. For instance:
+
+ - if VLAN is not stripped and TCI is saved: ``PKT_RX_VLAN``
+ - if VLAN is stripped and TCI is saved: ``PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED``
+
+* **Modified the vlan_offload_set_t function prototype in the ethdev library.**
+
+ Changed the function prototype of ``vlan_offload_set_t``. The return value
+ has been changed from ``void`` to ``int`` so the caller to knows whether
+ the backing device supports the operation or if the operation was
+ successfully performed.
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+ * Add a short 1-2 sentence description of the ABI change that was announced
+ in the previous releases and made in this release. Use fixed width quotes
+ for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Extended port_id range.**
+
+ The size of the field ``port_id`` in the ``rte_eth_dev_data`` structure
+ changed, as described in the `New Features` section.
+
+* **New parameter added to rte_eth_dev.**
+
+ New parameter ``security_ctx`` added to ``rte_eth_dev`` to support security
+ operations like IPSec inline.
+
+* **New parameter added to rte_cryptodev.**
+
+ New parameter ``security_ctx`` added to ``rte_cryptodev`` to support security
+ operations like lookaside crypto.
+
+
+Removed Items
+-------------
+
+.. This section should contain removed items in this release. Sample format:
+
+ * Add a short 1-2 sentence description of the removed item in the past
+ tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Xen dom0 in EAL was removed, as well as xenvirt PMD and vhost_xen.
+
+* The crypto performance unit tests have been removed,
+ replaced by the dpdk-test-crypto-perf application.
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+``
+ sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. do not overwrite or remove it.
+ =========================================================
+
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ + librte_bitratestats.so.2
+ + librte_bus_vdev.so.1
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ + librte_cryptodev.so.4
+ librte_distributor.so.1
+ + librte_eal.so.6
+ + librte_ethdev.so.8
+ + librte_eventdev.so.3
+ + librte_flow_classify.so.1
+ librte_gro.so.1
+ + librte_gso.so.1
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_latencystats.so.1
+ librte_lpm.so.2
+ librte_mbuf.so.3
+ + librte_mempool.so.3
+ librte_meter.so.1
+ librte_metrics.so.1
+ librte_net.so.1
+ + librte_pci.so.1
+ + librte_pdump.so.2
+ librte_pipeline.so.3
+ + librte_pmd_bnxt.so.2
+ + librte_pmd_bond.so.2
+ + librte_pmd_i40e.so.2
+ + librte_pmd_ixgbe.so.2
+ librte_pmd_ring.so.2
+ + librte_pmd_softnic.so.1
+ + librte_pmd_vhost.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_reorder.so.1
+ librte_ring.so.1
+ librte_sched.so.1
+ + librte_security.so.1
+ + librte_table.so.3
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this
+ release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
diff --git a/doc/guides/sample_app_ug/cmd_line.rst b/doc/guides/sample_app_ug/cmd_line.rst
index 36c7971c..eabbac15 100644
--- a/doc/guides/sample_app_ug/cmd_line.rst
+++ b/doc/guides/sample_app_ug/cmd_line.rst
@@ -68,26 +68,9 @@ There are three simple commands:
Compiling the Application
-------------------------
-#. Go to example directory:
+To compile the sample application see :doc:`compiling`
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/cmdline
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- Refer to the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``cmd_line`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/compiling.rst b/doc/guides/sample_app_ug/compiling.rst
new file mode 100644
index 00000000..8bedaa79
--- /dev/null
+++ b/doc/guides/sample_app_ug/compiling.rst
@@ -0,0 +1,122 @@
+ .. BSD LICENSE
+ Copyright(c) 2015 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Compiling the Sample Applications
+=================================
+
+This section explains how to compile the DPDK sample applications.
+
+To compile all the sample applications
+--------------------------------------
+
+
+Set the path to DPDK source code if its not set:
+
+ .. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+
+Go to DPDK source:
+
+ .. code-block:: console
+
+ cd $RTE_SDK
+
+Build DPDK:
+
+ .. code-block:: console
+
+ make defconfig
+ make
+
+Build the sample applications:
+
+ .. code-block:: console
+
+ export RTE_TARGET=build
+ make -C examples
+
+For other possible ``RTE_TARGET`` values and additional information on
+compiling see
+:ref:`Compiling DPDK on Linux <linux_gsg_compiling_dpdk>` or
+:ref:`Compiling DPDK on FreeBSD <building_from_source>`.
+Applications are output to: ``$RTE_SDK/examples/app-dir/build`` or
+``$RTE_SDK/examples/app-dir/$RTE_TARGET``.
+
+
+In the example above the compiled application is written to the ``build`` subdirectory.
+To have the applications written to a different location,
+the ``O=/path/to/build/directory`` option may be specified in the make command.
+
+ .. code-block:: console
+
+ make O=/tmp
+
+To build the applications for debugging use the ``DEBUG`` option.
+This option adds some extra flags, disables compiler optimizations and
+sets verbose output.
+
+ .. code-block:: console
+
+ make DEBUG=1
+
+
+To compile a single application
+-------------------------------
+
+Set the path to DPDK source code:
+
+ .. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+
+Go to DPDK source:
+
+ .. code-block:: console
+
+ cd $RTE_SDK
+
+Build DPDK:
+
+ .. code-block:: console
+
+ make defconfig
+ make
+
+Go to the sample application directory. Unless otherwise specified the sample
+applications are located in ``$RTE_SDK/examples/``.
+
+
+Build the application:
+
+ .. code-block:: console
+
+ export RTE_TARGET=build
+ make
diff --git a/doc/guides/sample_app_ug/dist_app.rst b/doc/guides/sample_app_ug/dist_app.rst
index 1cae4739..45a51313 100644
--- a/doc/guides/sample_app_ug/dist_app.rst
+++ b/doc/guides/sample_app_ug/dist_app.rst
@@ -53,30 +53,12 @@ generator as shown in the figure below.
Performance Benchmarking Setup (Basic Environment)
-
Compiling the Application
-------------------------
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/distributor
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the DPDK Getting Started Guide for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
+To compile the sample application see :doc:`compiling`.
- make
+The application is located in the ``distributor`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/ethtool.rst b/doc/guides/sample_app_ug/ethtool.rst
index 67797954..6dd11dc9 100644
--- a/doc/guides/sample_app_ug/ethtool.rst
+++ b/doc/guides/sample_app_ug/ethtool.rst
@@ -40,28 +40,9 @@ is based upon a simple L2 frame reflector.
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/ethtool
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``ethtool`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst b/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
index b1b18dd0..01a5f9b2 100644
--- a/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
+++ b/doc/guides/sample_app_ug/eventdev_pipeline_sw_pmd.rst
@@ -46,28 +46,11 @@ a particular pipeline configuration.
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
+The application is located in the ``examples`` sub-directory.
- .. code-block:: console
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/eventdev_pipeline_sw_pmd
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/exception_path.rst b/doc/guides/sample_app_ug/exception_path.rst
index e505fb32..d7f45e6d 100644
--- a/doc/guides/sample_app_ug/exception_path.rst
+++ b/doc/guides/sample_app_ug/exception_path.rst
@@ -58,28 +58,9 @@ To make throughput measurements, kernel bridges must be setup to forward data be
Compiling the Application
-------------------------
-#. Go to example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/exception_path
-
-#. Set the target (a default target will be used if not specified).
- For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-This application is intended as a linuxapp only.
-See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``exception_path`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst
new file mode 100644
index 00000000..bc12b87d
--- /dev/null
+++ b/doc/guides/sample_app_ug/flow_classify.rst
@@ -0,0 +1,575 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Flow Classify Sample Application
+================================
+
+The Flow Classify sample application is based on the simple *skeleton* example
+of a forwarding application.
+
+It is intended as a demonstration of the basic components of a DPDK forwarding
+application which uses the Flow Classify library API's.
+
+Please refer to the
+:doc:`../prog_guide/flow_classify_lib`
+for more information.
+
+Compiling the Application
+-------------------------
+
+To compile the sample application see :doc:`compiling`.
+
+The application is located in the ``flow_classify`` sub-directory.
+
+Running the Application
+-----------------------
+
+To run the example in a ``linuxapp`` environment:
+
+.. code-block:: console
+
+ cd ~/dpdk/examples/flow_classify
+ ./build/flow_classify -c 4 -n 4 -- --rule_ipv4="../ipv4_rules_file.txt"
+
+Please refer to the *DPDK Getting Started Guide*, section
+:doc:`../linux_gsg/build_sample_apps`
+for general information on running applications and the Environment Abstraction
+Layer (EAL) options.
+
+
+Sample ipv4_rules_file.txt
+--------------------------
+
+.. code-block:: console
+
+ #file format:
+ #src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
+ #
+ 2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
+ 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
+ 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
+ 9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
+ 6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
+
+Explanation
+-----------
+
+The following sections provide an explanation of the main components of the
+code.
+
+All DPDK library functions used in the sample code are prefixed with ``rte_``
+and are explained in detail in the *DPDK API Documentation*.
+
+ACL field definitions for the IPv4 5 tuple rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following field definitions are used when creating the ACL table during
+initialisation of the ``Flow Classify`` application..
+
+.. code-block:: c
+
+ enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+ };
+
+ enum {
+ PROTO_INPUT_IPV4,
+ SRC_INPUT_IPV4,
+ DST_INPUT_IPV4,
+ SRCP_DESTP_INPUT_IPV4
+ };
+
+ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+ /* first input field - always one byte long. */
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = PROTO_FIELD_IPV4,
+ .input_index = PROTO_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+ /* next input field (IPv4 source address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = SRC_FIELD_IPV4,
+ .input_index = SRC_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+ /* next input field (IPv4 destination address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = DST_FIELD_IPV4,
+ .input_index = DST_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+ /*
+ * Next 2 fields (src & dst ports) form 4 consecutive bytes.
+ * They share the same input index.
+ */
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = SRCP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = DSTP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+ };
+
+The Main Function
+~~~~~~~~~~~~~~~~~
+
+The ``main()`` function performs the initialization and calls the execution
+threads for each lcore.
+
+The first task is to initialize the Environment Abstraction Layer (EAL).
+The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
+function. The value returned is the number of parsed arguments:
+
+.. code-block:: c
+
+ int ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+It then parses the flow_classify application arguments
+
+.. code-block:: c
+
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
+
+The ``main()`` function also allocates a mempool to hold the mbufs
+(Message Buffers) used by the application:
+
+.. code-block:: c
+
+ mbuf_pool = rte_mempool_create("MBUF_POOL",
+ NUM_MBUFS * nb_ports,
+ MBUF_SIZE,
+ MBUF_CACHE_SIZE,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ rte_socket_id(),
+ 0);
+
+mbufs are the packet buffer structure used by DPDK. They are explained in
+detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
+
+The ``main()`` function also initializes all the ports using the user defined
+``port_init()`` function which is explained in the next section:
+
+.. code-block:: c
+
+ for (portid = 0; portid < nb_ports; portid++) {
+ if (port_init(portid, mbuf_pool) != 0) {
+ rte_exit(EXIT_FAILURE,
+ "Cannot init port %" PRIu8 "\n", portid);
+ }
+ }
+
+The ``main()`` function creates the ``flow classifier object`` and adds an ``ACL
+table`` to the flow classifier.
+
+.. code-block:: c
+
+ struct flow_classifier {
+ struct rte_flow_classifier *cls;
+ uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX];
+ };
+
+ struct flow_classifier_acl {
+ struct flow_classifier cls;
+ } __rte_cache_aligned;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
+ cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (cls_app == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
+
+ cls_params.name = "flow_classifier";
+ cls_params.socket_id = socket_id;
+ cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL;
+
+ cls_app->cls = rte_flow_classifier_create(&cls_params);
+ if (cls_app->cls == NULL) {
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
+ }
+
+ /* initialise ACL table params */
+ table_acl_params.name = "table_acl_ipv4_5tuple";
+ table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
+ table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
+ memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
+
+ /* initialise table create params */
+ cls_table_params.ops = &rte_table_acl_ops,
+ cls_table_params.arg_create = &table_acl_params,
+ cls_table_params.table_metadata_size = 0;
+
+ ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params,
+ &cls->table_id[0]);
+ if (ret) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls);
+ rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
+ }
+
+It then reads the ipv4_rules_file.txt file and initialises the parameters for
+the ``rte_flow_classify_table_entry_add`` API.
+This API adds a rule to the ACL table.
+
+.. code-block:: c
+
+ if (add_rules(parm_config.rule_ipv4_name)) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Failed to add rules\n");
+ }
+
+Once the initialization is complete, the application is ready to launch a
+function on an lcore. In this example ``lcore_main()`` is called on a single
+lcore.
+
+.. code-block:: c
+
+ lcore_main(cls_app);
+
+The ``lcore_main()`` function is explained below.
+
+The Port Initialization Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main functional part of the port initialization used in the Basic
+Forwarding application is shown below:
+
+.. code-block:: c
+
+ static inline int
+ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+ {
+ struct rte_eth_conf port_conf = port_conf_default;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ struct ether_addr addr;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+ return 0;
+ }
+
+The Ethernet ports are configured with default settings using the
+``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
+
+.. code-block:: c
+
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+ };
+
+For this example the ports are set up with 1 RX and 1 TX queue using the
+``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
+
+The Ethernet port is then started:
+
+.. code-block:: c
+
+ retval = rte_eth_dev_start(port);
+
+
+Finally the RX port is set in promiscuous mode:
+
+.. code-block:: c
+
+ rte_eth_promiscuous_enable(port);
+
+The Add Rules function
+~~~~~~~~~~~~~~~~~~~~~~
+
+The ``add_rules`` function reads the ``ipv4_rules_file.txt`` file and calls the
+``add_classify_rule`` function which calls the
+``rte_flow_classify_table_entry_add`` API.
+
+.. code-block:: c
+
+ static int
+ add_rules(const char *rule_path)
+ {
+ FILE *fh;
+ char buff[LINE_MAX];
+ unsigned int i = 0;
+ unsigned int total_num = 0;
+ struct rte_eth_ntuple_filter ntuple_filter;
+
+ fh = fopen(rule_path, "rb");
+ if (fh == NULL)
+ rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
+ rule_path);
+
+ fseek(fh, 0, SEEK_SET);
+
+ i = 0;
+ while (fgets(buff, LINE_MAX, fh) != NULL) {
+ i++;
+
+ if (is_bypass_line(buff))
+ continue;
+
+ if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
+ printf("\nINFO: classify rule capacity %d reached\n",
+ total_num);
+ break;
+ }
+
+ if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
+ rte_exit(EXIT_FAILURE,
+ "%s Line %u: parse rules error\n",
+ rule_path, i);
+
+ if (add_classify_rule(&ntuple_filter) != 0)
+ rte_exit(EXIT_FAILURE, "add rule error\n");
+
+ total_num++;
+ }
+
+ fclose(fh);
+ return 0;
+ }
+
+
+The Lcore Main function
+~~~~~~~~~~~~~~~~~~~~~~~
+
+As we saw above the ``main()`` function calls an application function on the
+available lcores.
+The ``lcore_main`` function calls the ``rte_flow_classifier_query`` API.
+For the Basic Forwarding application the ``lcore_main`` function looks like the
+following:
+
+.. code-block:: c
+
+ /* flow classify data */
+ static int num_classify_rules;
+ static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
+ static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
+ static struct rte_flow_classify_stats classify_stats = {
+ .stats = (void *)&ntuple_stats
+ };
+
+ static __attribute__((noreturn)) void
+ lcore_main(cls_app)
+ {
+ const uint8_t nb_ports = rte_eth_dev_count();
+ uint8_t port;
+
+ /*
+ * Check that the port is on the same NUMA node as the polling thread
+ * for best performance.
+ */
+ for (port = 0; port < nb_ports; port++)
+ if (rte_eth_dev_socket_id(port) > 0 &&
+ rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
+ printf("\n\n");
+ printf("WARNING: port %u is on remote NUMA node\n",
+ port);
+ printf("to polling thread.\n");
+ printf("Performance will not be optimal.\n");
+
+ printf("\nCore %u forwarding packets. \n",
+ rte_lcore_id());
+ printf("[Ctrl+C to quit]\n
+ }
+
+ /* Run until the application is quit or killed. */
+ for (;;) {
+ /*
+ * Receive packets on a port and forward them on the paired
+ * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
+ */
+ for (port = 0; port < nb_ports; port++) {
+
+ /* Get burst of RX packets, from first port of pair. */
+ struct rte_mbuf *bufs[BURST_SIZE];
+ const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
+ bufs, BURST_SIZE);
+
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
+ if (rules[i]) {
+ ret = rte_flow_classifier_query(
+ cls_app->cls,
+ cls_app->table_id[0],
+ bufs, nb_rx, rules[i],
+ &classify_stats);
+ if (ret)
+ printf(
+ "rule [%d] query failed ret [%d]\n\n",
+ i, ret);
+ else {
+ printf(
+ "rule[%d] count=%"PRIu64"\n",
+ i, ntuple_stats.counter1);
+
+ printf("proto = %d\n",
+ ntuple_stats.ipv4_5tuple.proto);
+ }
+ }
+ }
+
+ /* Send burst of TX packets, to second port of pair. */
+ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
+ bufs, nb_rx);
+
+ /* Free any unsent packets. */
+ if (unlikely(nb_tx < nb_rx)) {
+ uint16_t buf;
+ for (buf = nb_tx; buf < nb_rx; buf++)
+ rte_pktmbuf_free(bufs[buf]);
+ }
+ }
+ }
+ }
+
+The main work of the application is done within the loop:
+
+.. code-block:: c
+
+ for (;;) {
+ for (port = 0; port < nb_ports; port++) {
+
+ /* Get burst of RX packets, from first port of pair. */
+ struct rte_mbuf *bufs[BURST_SIZE];
+ const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
+ bufs, BURST_SIZE);
+
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ /* Send burst of TX packets, to second port of pair. */
+ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
+ bufs, nb_rx);
+
+ /* Free any unsent packets. */
+ if (unlikely(nb_tx < nb_rx)) {
+ uint16_t buf;
+ for (buf = nb_tx; buf < nb_rx; buf++)
+ rte_pktmbuf_free(bufs[buf]);
+ }
+ }
+ }
+
+Packets are received in bursts on the RX ports and transmitted in bursts on
+the TX ports. The ports are grouped in pairs with a simple mapping scheme
+using the an XOR on the port number::
+
+ 0 -> 1
+ 1 -> 0
+
+ 2 -> 3
+ 3 -> 2
+
+ etc.
+
+The ``rte_eth_tx_burst()`` function frees the memory buffers of packets that
+are transmitted. If packets fail to transmit, ``(nb_tx < nb_rx)``, then they
+must be freed explicitly using ``rte_pktmbuf_free()``.
+
+The forwarding loop can be interrupted and the application closed using
+``Ctrl-C``.
diff --git a/doc/guides/sample_app_ug/flow_filtering.rst b/doc/guides/sample_app_ug/flow_filtering.rst
new file mode 100644
index 00000000..725dcb44
--- /dev/null
+++ b/doc/guides/sample_app_ug/flow_filtering.rst
@@ -0,0 +1,545 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Mellanox Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Mellanox Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Basic RTE Flow Filtering Sample Application
+===========================================
+
+The Basic RTE flow filtering sample application is a simple example of a
+creating a RTE flow rule.
+
+It is intended as a demonstration of the basic components RTE flow rules.
+
+
+Compiling the Application
+-------------------------
+
+To compile the application export the path to the DPDK source tree and go to
+the example directory:
+
+.. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+
+ cd ${RTE_SDK}/examples/flow_filtering
+
+Set the target, for example:
+
+.. code-block:: console
+
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+See the *DPDK Getting Started* Guide for possible ``RTE_TARGET`` values.
+
+Build the application as follows:
+
+.. code-block:: console
+
+ make
+
+
+Running the Application
+-----------------------
+
+To run the example in a ``linuxapp`` environment:
+
+.. code-block:: console
+
+ ./build/flow -l 1 -n 1
+
+Refer to *DPDK Getting Started Guide* for general information on running
+applications and the Environment Abstraction Layer (EAL) options.
+
+
+Explanation
+-----------
+
+The example is build from 2 main files,
+``main.c`` which holds the example logic and ``flow_blocks.c`` that holds the
+implementation for building the flow rule.
+
+The following sections provide an explanation of the main components of the
+code.
+
+All DPDK library functions used in the sample code are prefixed with ``rte_``
+and are explained in detail in the *DPDK API Documentation*.
+
+
+The Main Function
+~~~~~~~~~~~~~~~~~
+
+The ``main()`` function located in ``main.c`` file performs the initialization
+and runs the main loop function.
+
+The first task is to initialize the Environment Abstraction Layer (EAL). The
+``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
+function. The value returned is the number of parsed arguments:
+
+.. code-block:: c
+
+ int ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+
+The ``main()`` also allocates a mempool to hold the mbufs (Message Buffers)
+used by the application:
+
+.. code-block:: c
+
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 4096, 128, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+Mbufs are the packet buffer structure used by DPDK. They are explained in
+detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
+
+The ``main()`` function also initializes all the ports using the user defined
+``init_port()`` function which is explained in the next section:
+
+.. code-block:: c
+
+ init_port();
+
+Once the initialization is complete, we set the flow rule using the
+following code:
+
+.. code-block:: c
+
+ /* create flow for send packet with */
+ flow = generate_ipv4_flow(port_id, selected_queue,
+ SRC_IP, EMPTY_MASK,
+ DEST_IP, FULL_MASK, &error);
+ if (!flow) {
+ printf("Flow can't be created %d message: %s\n",
+ error.type,
+ error.message ? error.message : "(no stated reason)");
+ rte_exit(EXIT_FAILURE, "error in creating flow");
+ }
+
+In the last part the application is ready to launch the
+``main_loop()`` function. Which is explained below.
+
+
+.. code-block:: c
+
+ main_loop();
+
+The Port Initialization Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main functional part of the port initialization used in the flow filtering
+application is shown below:
+
+.. code-block:: c
+
+ init_port(void)
+ {
+ int ret;
+ uint16_t i;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ /**< Header Split disabled */
+ .header_split = 0,
+ /**< IP checksum offload disabled */
+ .hw_ip_checksum = 0,
+ /**< VLAN filtering disabled */
+ .hw_vlan_filter = 0,
+ /**< Jumbo Frame Support disabled */
+ .jumbo_frame = 0,
+ /**< CRC stripped by hardware */
+ .hw_strip_crc = 1,
+ },
+ };
+
+ printf(":: initializing port: %d\n", port_id);
+ ret = rte_eth_dev_configure(port_id,
+ nr_queues, nr_queues, &port_conf);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ /* only set Rx queues: something we care only so far */
+ for (i = 0; i < nr_queues; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i, 512,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: Rx queue setup failed: err=%d, port=%u\n",
+ ret, port_id);
+ }
+ }
+
+
+ rte_eth_promiscuous_enable(port_id);
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ assert_link_status();
+
+ printf(":: initializing port: %d done\n", port_id);
+ }
+
+The Ethernet port is configured with default settings using the
+``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
+
+.. code-block:: c
+
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ /**< Header Split disabled */
+ .header_split = 0,
+ /**< IP checksum offload disabled */
+ .hw_ip_checksum = 0,
+ /**< VLAN filtering disabled */
+ .hw_vlan_filter = 0,
+ /**< Jumbo Frame Support disabled */
+ .jumbo_frame = 0,
+ /**< CRC stripped by hardware */
+ .hw_strip_crc = 1,
+ },
+ };
+
+ ret = rte_eth_dev_configure(port_id, nr_queues, nr_queues, &port_conf);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+For this example we are configuring number of rx queues that are connected to
+a single port.
+
+.. code-block:: c
+
+ for (i = 0; i < nr_queues; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i, 512,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: Rx queue setup failed: err=%d, port=%u\n",
+ ret, port_id);
+ }
+ }
+
+In the next step we create and apply the flow rule. which is to send packets
+with destination ip equals to 192.168.1.1 to queue number 1. The detail
+explanation of the ``generate_ipv4_flow()`` appears later in this document:
+
+.. code-block:: c
+
+ flow = generate_ipv4_flow(port_id, selected_queue,
+ SRC_IP, EMPTY_MASK,
+ DEST_IP, FULL_MASK, &error);
+
+We are setting the RX port to promiscuous mode:
+
+.. code-block:: c
+
+ rte_eth_promiscuous_enable(port_id);
+
+The last step is to start the port.
+
+.. code-block:: c
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err%d, port=%u\n",
+ ret, port_id);
+ }
+
+
+The main_loop function
+~~~~~~~~~~~~~~~~~~~~~~
+
+As we saw above the ``main()`` function calls an application function to handle
+the main loop. For the flow filtering application the main_loop function
+looks like the following:
+
+.. code-block:: c
+
+ static void
+ main_loop(void)
+ {
+ struct rte_mbuf *mbufs[32];
+ struct ether_hdr *eth_hdr;
+ uint16_t nb_rx;
+ uint16_t i;
+ uint16_t j;
+
+ while (!force_quit) {
+ for (i = 0; i < nr_queues; i++) {
+ nb_rx = rte_eth_rx_burst(port_id,
+ i, mbufs, 32);
+ if (nb_rx) {
+ for (j = 0; j < nb_rx; j++) {
+ struct rte_mbuf *m = mbufs[j];
+
+ eth_hdr = rte_pktmbuf_mtod(m,
+ struct ether_hdr *);
+ print_ether_addr("src=",
+ &eth_hdr->s_addr);
+ print_ether_addr(" - dst=",
+ &eth_hdr->d_addr);
+ printf(" - queue=0x%x",
+ (unsigned int)i);
+ printf("\n");
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+ }
+ /* closing and releasing resources */
+ rte_flow_flush(port_id, &error);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+ }
+
+The main work of the application is reading the packets from all
+queues and printing for each packet the destination queue:
+
+.. code-block:: c
+
+ while (!force_quit) {
+ for (i = 0; i < nr_queues; i++) {
+ nb_rx = rte_eth_rx_burst(port_id, i, mbufs, 32);
+ if (nb_rx) {
+ for (j = 0; j < nb_rx; j++) {
+ struct rte_mbuf *m = mbufs[j];
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ print_ether_addr("src=", &eth_hdr->s_addr);
+ print_ether_addr(" - dst=", &eth_hdr->d_addr);
+ printf(" - queue=0x%x", (unsigned int)i);
+ printf("\n");
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+ }
+
+
+The forwarding loop can be interrupted and the application closed using
+``Ctrl-C``. Which results in closing the port and the device using
+``rte_eth_dev_stop`` and ``rte_eth_dev_close``
+
+The generate_ipv4_flow function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The generate_ipv4_rule function is responsible for creating the flow rule.
+This function is located in the ``flow_blocks.c`` file.
+
+.. code-block:: c
+
+ static struct rte_flow *
+ generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+ uint32_t src_ip, uint32_t src_mask,
+ uint32_t dest_ip, uint32_t dest_mask,
+ struct rte_flow_error *error)
+ {
+ struct rte_flow_attr attr;
+ struct rte_flow_item pattern[MAX_PATTERN_NUM];
+ struct rte_flow_action action[MAX_PATTERN_NUM];
+ struct rte_flow *flow = NULL;
+ struct rte_flow_action_queue queue = { .index = rx_q };
+ struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_mask;
+ struct rte_flow_item_vlan vlan_spec;
+ struct rte_flow_item_vlan vlan_mask;
+ struct rte_flow_item_ipv4 ip_spec;
+ struct rte_flow_item_ipv4 ip_mask;
+
+ memset(pattern, 0, sizeof(pattern));
+ memset(action, 0, sizeof(action));
+
+ /*
+ * set the rule attribute.
+ * in this case only ingress packets will be checked.
+ */
+ memset(&attr, 0, sizeof(struct rte_flow_attr));
+ attr.ingress = 1;
+
+ /*
+ * create the action sequence.
+ * one action only, move packet to queue
+ */
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = &queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /*
+ * set the first level of the pattern (eth).
+ * since in this example we just want to get the
+ * ipv4 we set this level to allow all.
+ */
+ memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
+ memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
+ eth_spec.type = 0;
+ eth_mask.type = 0;
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth_spec;
+ pattern[0].mask = &eth_mask;
+
+ /*
+ * setting the second level of the pattern (vlan).
+ * since in this example we just want to get the
+ * ipv4 we also set this level to allow all.
+ */
+ memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
+ memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan_spec;
+ pattern[1].mask = &vlan_mask;
+
+ /*
+ * setting the third level of the pattern (ip).
+ * in this example this is the level we care about
+ * so we set it according to the parameters.
+ */
+ memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
+ memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
+ ip_spec.hdr.dst_addr = htonl(dest_ip);
+ ip_mask.hdr.dst_addr = dest_mask;
+ ip_spec.hdr.src_addr = htonl(src_ip);
+ ip_mask.hdr.src_addr = src_mask;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ip_spec;
+ pattern[2].mask = &ip_mask;
+
+ /* the final level must be always type end */
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ int res = rte_flow_validate(port_id, &attr, pattern, action, error);
+ if(!res)
+ flow = rte_flow_create(port_id, &attr, pattern, action, error);
+
+ return flow;
+ }
+
+The first part of the function is declaring the structures that will be used.
+
+.. code-block:: c
+
+ struct rte_flow_attr attr;
+ struct rte_flow_item pattern[MAX_PATTERN_NUM];
+ struct rte_flow_action action[MAX_PATTERN_NUM];
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ struct rte_flow_action_queue queue = { .index = rx_q };
+ struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_mask;
+ struct rte_flow_item_vlan vlan_spec;
+ struct rte_flow_item_vlan vlan_mask;
+ struct rte_flow_item_ipv4 ip_spec;
+ struct rte_flow_item_ipv4 ip_mask;
+
+The following part create the flow attributes, in our case ingress.
+
+.. code-block:: c
+
+ memset(&attr, 0, sizeof(struct rte_flow_attr));
+ attr.ingress = 1;
+
+The third part defines the action to be taken when a packet matches
+the rule. In this case send the packet to queue.
+
+.. code-block:: c
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = &queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+The forth part is responsible for creating the pattern and is build from
+number of step. In each step we build one level of the pattern starting with
+the lowest one.
+
+Setting the first level of the pattern ETH:
+
+.. code-block:: c
+
+ memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
+ memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
+ eth_spec.type = 0;
+ eth_mask.type = 0;
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth_spec;
+ pattern[0].mask = &eth_mask;
+
+Setting the second level of the pattern VLAN:
+
+.. code-block:: c
+
+ memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
+ memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan_spec;
+ pattern[1].mask = &vlan_mask;
+
+Setting the third level ip:
+
+.. code-block:: c
+
+ memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
+ memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
+ ip_spec.hdr.dst_addr = htonl(dest_ip);
+ ip_mask.hdr.dst_addr = dest_mask;
+ ip_spec.hdr.src_addr = htonl(src_ip);
+ ip_mask.hdr.src_addr = src_mask;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ip_spec;
+ pattern[2].mask = &ip_mask;
+
+Closing the pattern part.
+
+.. code-block:: c
+
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+The last part of the function is to validate the rule and create it.
+
+.. code-block:: c
+
+ int res = rte_flow_validate(port_id, &attr, pattern, action, &error);
+ if (!res)
+ flow = rte_flow_create(port_id, &attr, pattern, action, &error);
+
diff --git a/doc/guides/sample_app_ug/hello_world.rst b/doc/guides/sample_app_ug/hello_world.rst
index f4753af0..3018d459 100644
--- a/doc/guides/sample_app_ug/hello_world.rst
+++ b/doc/guides/sample_app_ug/hello_world.rst
@@ -37,26 +37,9 @@ The application simply prints an "helloworld" message on every enabled lcore.
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/helloworld
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started* Guide for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``helloworld`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index 069d4f12..db68ef76 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -36,12 +36,15 @@ Sample Applications User Guides
:numbered:
intro
+ compiling
cmd_line
ethtool
exception_path
hello_world
skeleton
rxtx_callbacks
+ flow_classify
+ flow_filtering
ip_frag
ipv4_multicast
ip_reassembly
@@ -58,6 +61,7 @@ Sample Applications User Guides
link_status_intr
load_balancer
server_node_efd
+ service_cores
multi_process
qos_metering
qos_scheduler
diff --git a/doc/guides/sample_app_ug/intro.rst b/doc/guides/sample_app_ug/intro.rst
index d3f261b3..ae12503b 100644
--- a/doc/guides/sample_app_ug/intro.rst
+++ b/doc/guides/sample_app_ug/intro.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -28,42 +28,115 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-Introduction
-============
-
-This document describes the sample applications that are included in the Data Plane Development Kit (DPDK).
-Each chapter describes a sample application that showcases specific functionality and
-provides instructions on how to compile, run and use the sample application.
-
-Documentation Roadmap
----------------------
-
-The following is a list of DPDK documents in suggested reading order:
-
-* **Release Notes** : Provides release-specific information, including supported features,
- limitations, fixed issues, known issues and so on.
- Also, provides the answers to frequently asked questions in FAQ format.
-
-* **Getting Started Guides** : Describes how to install and
- configure the DPDK software for your operating system;
- designed to get users up and running quickly with the software.
-
-* **Programmer's Guide:** Describes:
-
- * The software architecture and how to use it (through examples),
- specifically in a Linux* application (linuxapp) environment.
-
- * The content of the DPDK, the build system
- (including the commands that can be used in the root DPDK Makefile to build the development kit and an application)
- and guidelines for porting an application.
-
- * Optimizations used in the software and those that should be considered for new development
-
-A glossary of terms is also provided.
-
-* **API Reference** : Provides detailed information about DPDK functions,
- data structures and other programming constructs.
-
-* **Sample Applications User Guide** : Describes a set of sample applications.
- Each chapter describes a sample application that showcases specific functionality and
- provides instructions on how to compile, run and use the sample application.
+Introduction to the DPDK Sample Applications
+============================================
+
+The DPDK Sample Applications are small standalone applications which
+demonstrate various features of DPDK. They can be considered as a cookbook of
+DPDK features. Users interested in getting started with DPDK can take the
+applications, try out the features, and then extend them to fit their needs.
+
+
+The DPDK Sample Applications
+----------------------------
+
+Table :numref:`table_sample_apps` shows a list of some of the main sample
+applications that are available in the examples directory of DPDK:
+
+ .. _table_sample_apps:
+
+ .. table:: **Some of the DPDK Sample applications**
+
+ +---------------------------------------+--------------------------------------+
+ | Bonding | Netmap Compatibility |
+ +---------------------------------------+--------------------------------------+
+ | Command Line | Packet Ordering |
+ +---------------------------------------+--------------------------------------+
+ | Distributor | Performance Thread |
+ +---------------------------------------+--------------------------------------+
+ | Ethtool | Precision Time Protocol (PTP) Client |
+ +---------------------------------------+--------------------------------------+
+ | Exception Path | Quality of Service (QoS) Metering |
+ +---------------------------------------+--------------------------------------+
+ | Hello World | QoS Scheduler |
+ +---------------------------------------+--------------------------------------+
+ | Internet Protocol (IP) Fragmentation | Quota and Watermark |
+ +---------------------------------------+--------------------------------------+
+ | IP Pipeline | RX/TX Callbacks |
+ +---------------------------------------+--------------------------------------+
+ | IP Reassembly | Server node EFD |
+ +---------------------------------------+--------------------------------------+
+ | IPsec Security Gateway | Basic Forwarding/Skeleton App |
+ +---------------------------------------+--------------------------------------+
+ | IPv4 multicast | Tunnel End Point (TEP) termination |
+ +---------------------------------------+--------------------------------------+
+ | Kernel NIC Interface | Timer |
+ +---------------------------------------+--------------------------------------+
+ | Network Layer 2 Forwarding + variants | Vhost |
+ +---------------------------------------+--------------------------------------+
+ | Network Layer 3 Forwarding + variants | Vhost Xen |
+ +---------------------------------------+--------------------------------------+
+ | Link Status Interrupt | VMDQ Forwarding |
+ +---------------------------------------+--------------------------------------+
+ | Load Balancer | VMDQ and DCB Forwarding |
+ +---------------------------------------+--------------------------------------+
+ | Multi-process | VM Power Management |
+ +---------------------------------------+--------------------------------------+
+
+These examples range from simple to reasonably complex but most are designed
+to demonstrate one particular feature of DPDK. Some of the more interesting
+examples are highlighted below.
+
+
+* :doc:`Hello World<hello_world>`: As with most introductions to a
+ programming framework a good place to start is with the Hello World
+ application. The Hello World example sets up the DPDK Environment Abstraction
+ Layer (EAL), and prints a simple "Hello World" message to each of the DPDK
+ enabled cores. This application doesn't do any packet forwarding but it is a
+ good way to test if the DPDK environment is compiled and set up properly.
+
+* :doc:`Basic Forwarding/Skeleton Application<skeleton>`: The Basic
+ Forwarding/Skeleton contains the minimum amount of code required to enable
+ basic packet forwarding with DPDK. This allows you to test if your network
+ interfaces are working with DPDK.
+
+* :doc:`Network Layer 2 forwarding<l2_forward_real_virtual>`: The Network Layer 2
+ forwarding, or ``l2fwd`` application does forwarding based on Ethernet MAC
+ addresses like a simple switch.
+
+* :doc:`Network Layer 3 forwarding<l3_forward>`: The Network Layer3
+ forwarding, or ``l3fwd`` application does forwarding based on Internet
+ Protocol, IPv4 or IPv6 like a simple router.
+
+* :doc:`Packet Distributor<dist_app>`: The Packet Distributor
+ demonstrates how to distribute packets arriving on an Rx port to different
+ cores for processing and transmission.
+
+* :doc:`Multi-Process Application<multi_process>`: The
+ multi-process application shows how two DPDK processes can work together using
+ queues and memory pools to share information.
+
+* :doc:`RX/TX callbacks Application<rxtx_callbacks>`: The RX/TX
+ callbacks sample application is a packet forwarding application that
+ demonstrates the use of user defined callbacks on received and transmitted
+ packets. The application calculates the latency of a packet between RX
+ (packet arrival) and TX (packet transmission) by adding callbacks to the RX
+ and TX packet processing functions.
+
+* :doc:`IPSec Security Gateway<ipsec_secgw>`: The IPSec Security
+ Gateway application is minimal example of something closer to a real world
+ example. This is also a good example of an application using the DPDK
+ Cryptodev framework.
+
+* :doc:`Precision Time Protocol (PTP) client<ptpclient>`: The PTP
+ client is another minimal implementation of a real world application.
+ In this case the application is a PTP client that communicates with a PTP
+ master clock to synchronize time on a Network Interface Card (NIC) using the
+ IEEE1588 protocol.
+
+* :doc:`Quality of Service (QoS) Scheduler<qos_scheduler>`: The QoS
+ Scheduler application demonstrates the use of DPDK to provide QoS scheduling.
+
+There are many more examples shown in the following chapters. Each of the
+documented sample applications show how to compile, configure and run the
+application as well as explaining the main functionality of the code.
diff --git a/doc/guides/sample_app_ug/ip_frag.rst b/doc/guides/sample_app_ug/ip_frag.rst
index bd1fe2dc..f45adfe7 100644
--- a/doc/guides/sample_app_ug/ip_frag.rst
+++ b/doc/guides/sample_app_ug/ip_frag.rst
@@ -59,31 +59,12 @@ Any unmatched packets are forwarded to the originating port.
By default, input frame sizes up to 9.5 KB are supported.
Before forwarding, the input IP packet is fragmented to fit into the "standard" Ethernet* v2 MTU (1500 bytes).
-Building the Application
-------------------------
+Compiling the Application
+-------------------------
-To build the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/ip_fragmentation
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``ip_fragmentation`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/ip_reassembly.rst b/doc/guides/sample_app_ug/ip_reassembly.rst
index cc9e5911..7dc80d0b 100644
--- a/doc/guides/sample_app_ug/ip_reassembly.rst
+++ b/doc/guides/sample_app_ug/ip_reassembly.rst
@@ -53,28 +53,14 @@ There are two key differences from the L2 Forwarding sample application:
The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number, associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.Compiling the Application
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-To compile the application:
-#. Go to the sample application directory:
+Compiling the Application
+-------------------------
- .. code-block:: console
+To compile the sample application see :doc:`compiling`.
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/ip_reassembly
+The application is located in the ``ip_reassembly`` sub-directory.
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index b675cbae..d6cfdbf7 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -52,13 +52,22 @@ The application classifies the ports as *Protected* and *Unprotected*.
Thus, traffic received on an Unprotected or Protected port is consider
Inbound or Outbound respectively.
+The application also supports complete IPSec protocol offload to hardware
+(Look aside crypto accelarator or using ethernet device). It also support
+inline ipsec processing by the supported ethernet device during transmission.
+These modes can be selected during the SA creation configuration.
+
+In case of complete protocol offload, the processing of headers(ESP and outer
+IP header) is done by the hardware and the application does not need to
+add/remove them during outbound/inbound processing.
+
The Path for IPsec Inbound traffic is:
* Read packets from the port.
* Classify packets between IPv4 and ESP.
* Perform Inbound SA lookup for ESP packets based on their SPI.
-* Perform Verification/Decryption.
-* Remove ESP and outer IP header
+* Perform Verification/Decryption (Not needed in case of inline ipsec).
+* Remove ESP and outer IP header (Not needed in case of protocol offload).
* Inbound SP check using ACL of decrypted packets and any other IPv4 packets.
* Routing.
* Write packet to port.
@@ -68,8 +77,8 @@ The Path for the IPsec Outbound traffic is:
* Read packets from the port.
* Perform Outbound SP check using ACL of all IPv4 traffic.
* Perform Outbound SA lookup for packets that need IPsec protection.
-* Add ESP and outer IP header.
-* Perform Encryption/Digest.
+* Add ESP and outer IP header (Not needed in case protocol offload).
+* Perform Encryption/Digest (Not needed in case of inline ipsec).
* Routing.
* Write packet to port.
@@ -83,27 +92,12 @@ Constraints
* Each SA must be handle by a unique lcore (*1 RX queue per port*).
* No chained mbufs.
-
Compiling the Application
-------------------------
-To compile the application:
-
-#. Go to the sample application directory::
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/ipsec-secgw
-
-#. Set the target (a default target is used if not specified). For example::
-
+To compile the sample application see :doc:`compiling`.
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application::
-
- make
+The application is located in the ``rpsec-secgw`` sub-directory.
#. [Optional] Build the application for debugging:
This option adds some extra flags, disables compiler optimizations and
@@ -119,7 +113,7 @@ The application has a number of command line options::
./build/ipsec-secgw [EAL options] --
- -p PORTMASK -P -u PORTMASK
+ -p PORTMASK -P -u PORTMASK -j FRAMESIZE
--config (port,queue,lcore)[,(port,queue,lcore]
--single-sa SAIDX
-f CONFIG_FILE_PATH
@@ -135,6 +129,10 @@ Where:
* ``-u PORTMASK``: hexadecimal bitmask of unprotected ports
+* ``-j FRAMESIZE``: *optional*. Enables jumbo frames with the maximum size
+ specified as FRAMESIZE. If an invalid value is provided as FRAMESIZE
+ then the default value 9000 is used.
+
* ``--config (port,queue,lcore)[,(port,queue,lcore)]``: determines which queues
from which ports are mapped to which cores.
@@ -385,7 +383,7 @@ The SA rule syntax is shown as follows:
.. code-block:: console
sa <dir> <spi> <cipher_algo> <cipher_key> <auth_algo> <auth_key>
- <mode> <src_ip> <dst_ip>
+ <mode> <src_ip> <dst_ip> <action_type> <port_id>
where each options means:
@@ -526,6 +524,34 @@ where each options means:
* *dst X.X.X.X* for IPv4
* *dst XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX* for IPv6
+``<type>``
+
+ * Action type to specify the security action. This option specify
+ the SA to be performed with look aside protocol offload to HW
+ accelerator or protocol offload on ethernet device or inline
+ crypto processing on the ethernet device during transmission.
+
+ * Optional: Yes, default type *no-offload*
+
+ * Available options:
+
+ * *lookaside-protocol-offload*: look aside protocol offload to HW accelerator
+ * *inline-protocol-offload*: inline protocol offload on ethernet device
+ * *inline-crypto-offload*: inline crypto processing on ethernet device
+ * *no-offload*: no offloading to hardware
+
+ ``<port_id>``
+
+ * Port/device ID of the ethernet/crypto accelerator for which the SA is
+ configured. This option is used when *type* is NOT *no-offload*
+
+ * Optional: No, if *type* is not *no-offload*
+
+ * Syntax:
+
+ * *port_id X* X is a valid device number in decimal
+
+
Example SA rules:
.. code-block:: console
@@ -545,6 +571,11 @@ Example SA rules:
aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \
mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5
+ sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \
+ auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \
+ mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 \
+ type lookaside-protocol-offload port_id 4
+
Routing rule syntax
^^^^^^^^^^^^^^^^^^^
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 49712a0f..fd1af006 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -63,37 +63,12 @@ with the mask of ports to multicast packets to.
Also, the application does not consider the Ethernet addresses;
it looks only at the IPv4 destination address for any given packet.
-Building the Application
-------------------------
+Compiling the Application
+-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/ipv4_multicast
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
-
-.. note::
-
- The compiled application is written to the build subdirectory.
- To have the application written to a different location,
- the O=/path/to/build/directory option may be specified in the make command.
+The application is located in the ``ipv4_multicast`` sub-directory.
Running the Application
-----------------------
@@ -268,7 +243,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:
.. code-block:: c
- static inline void mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr, struct lcore_queue_conf *qconf, uint8_t port)
+ static inline void mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr, struct lcore_queue_conf *qconf, uint16_t port)
{
struct ether_hdr *ethdr;
uint16_t len;
diff --git a/doc/guides/sample_app_ug/keep_alive.rst b/doc/guides/sample_app_ug/keep_alive.rst
index fe908206..9b8be489 100644
--- a/doc/guides/sample_app_ug/keep_alive.rst
+++ b/doc/guides/sample_app_ug/keep_alive.rst
@@ -66,27 +66,9 @@ of the L2 forwarding application.
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk cd ${RTE_SDK}/examples/keep_alive
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l2fwd_keep_alive`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/kernel_nic_interface.rst b/doc/guides/sample_app_ug/kernel_nic_interface.rst
index 619a7b52..e1ac4153 100644
--- a/doc/guides/sample_app_ug/kernel_nic_interface.rst
+++ b/doc/guides/sample_app_ug/kernel_nic_interface.rst
@@ -77,35 +77,17 @@ The packet flow through the Kernel NIC Interface application is as shown in the
Kernel NIC Application Packet Flow
-
Compiling the Application
-------------------------
-Compile the application as follows:
-
-#. Go to the example directory:
-
- .. code-block:: console
+To compile the sample application see :doc:`compiling`.
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/kni
+The application is located in the ``kni`` sub-directory.
-#. Set the target (a default target is used if not specified)
-
- .. note::
+.. note::
This application is intended as a linuxapp only.
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application:
-
- .. code-block:: console
-
- make
-
Loading the Kernel Module
-------------------------
@@ -246,7 +228,7 @@ The code for allocating the kernel NIC interfaces for a specific port is as foll
.. code-block:: c
static int
- kni_alloc(uint8_t port_id)
+ kni_alloc(uint16_t port_id)
{
uint8_t i;
struct rte_kni *kni;
@@ -335,7 +317,7 @@ The code is as follows:
int i, j, nb_token;
char *str_fld[_NUM_FLD];
unsigned long int_fld[_NUM_FLD];
- uint8_t port_id, nb_kni_port_params = 0;
+ uint16_t port_id, nb_kni_port_params = 0;
memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
@@ -532,7 +514,7 @@ Currently, setting a new MTU and configuring the network interface (up/ down) ar
/* Callback for request of changing MTU */
static int
- kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+ kni_change_mtu(uint16_t port_id, unsigned new_mtu)
{
int ret;
struct rte_eth_conf conf;
@@ -581,7 +563,7 @@ Currently, setting a new MTU and configuring the network interface (up/ down) ar
/* Callback for request of configuring network interface up/down */
static int
- kni_config_network_interface(uint8_t port_id, uint8_t if_up)
+ kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
diff --git a/doc/guides/sample_app_ug/l2_forward_cat.rst b/doc/guides/sample_app_ug/l2_forward_cat.rst
index b0c2e109..a10f23a1 100644
--- a/doc/guides/sample_app_ug/l2_forward_cat.rst
+++ b/doc/guides/sample_app_ug/l2_forward_cat.rst
@@ -67,38 +67,28 @@ White paper demonstrating example use case:
Compiling the Application
-------------------------
+.. note::
-Requires ``libpqos`` from Intel's
-`intel-cmt-cat software package <https://github.com/01org/intel-cmt-cat>`_
-hosted on GitHub repository. For installation notes, please see ``README`` file.
+ Requires ``libpqos`` from Intel's
+ `intel-cmt-cat software package <https://github.com/01org/intel-cmt-cat>`_
+ hosted on GitHub repository. For installation notes, please see ``README`` file.
-GIT:
+ GIT:
-* https://github.com/01org/intel-cmt-cat
-
-To compile the application export the path to PQoS lib
-and the DPDK source tree and go to the example directory:
-
-.. code-block:: console
+ * https://github.com/01org/intel-cmt-cat
- export PQOS_INSTALL_PATH=/path/to/libpqos
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l2fwd-cat
+#. To compile the application export the path to PQoS lib
+ and the DPDK source tree and go to the example directory:
-Set the target, for example:
+ .. code-block:: console
-.. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
+ export PQOS_INSTALL_PATH=/path/to/libpqos
-See the *DPDK Getting Started* Guide for possible ``RTE_TARGET`` values.
-Build the application as follows:
-
-.. code-block:: console
+To compile the sample application see :doc:`compiling`.
- make
+The application is located in the ``l2fwd-cat`` sub-directory.
Running the Application
diff --git a/doc/guides/sample_app_ug/l2_forward_crypto.rst b/doc/guides/sample_app_ug/l2_forward_crypto.rst
index 158f4b44..1e85b4a3 100644
--- a/doc/guides/sample_app_ug/l2_forward_crypto.rst
+++ b/doc/guides/sample_app_ug/l2_forward_crypto.rst
@@ -55,26 +55,9 @@ Also, if MAC addresses updating is enabled, the MAC addresses are affected as fo
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l2fwd-crypto
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- *See the DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l2fwd-crypt`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
index 1029cc8f..54b970f8 100644
--- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst
+++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -97,26 +97,9 @@ in this case enabling a total of four Virtual Functions.
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l2fwd-jobstats
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- *See the DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l2fwd-jobstats`` sub-directory.
Running the Application
-----------------------
@@ -558,7 +541,7 @@ If the table is full, the whole packets table is transmitted using the l2fwd_sen
/* Send the packet on an output interface */
static int
- l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+ l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -593,7 +576,7 @@ however it improves performance:
unsigned lcore_id;
struct lcore_queue_conf *qconf;
struct mbuf_table *m_table;
- uint8_t portid;
+ uint16_t portid;
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
index f579c8fe..4d9cae43 100644
--- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
+++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
@@ -107,26 +107,9 @@ in this case enabling a total of four Virtual Functions.
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l2fwd
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- *See the DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l2fwd`` sub-directory.
Running the Application
-----------------------
@@ -474,7 +457,7 @@ If the table is full, the whole packets table is transmitted using the l2fwd_sen
/* Send the packet on an output interface */
static int
- l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+ l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst
index 6a6b8fbe..19b91e27 100644
--- a/doc/guides/sample_app_ug/l3_forward.rst
+++ b/doc/guides/sample_app_ug/l3_forward.rst
@@ -62,28 +62,9 @@ In the sample application, hash-based forwarding supports IPv4 and IPv6. LPM-bas
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l3fwd
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l3fwd`` sub-directory.
Running the Application
-----------------------
@@ -283,7 +264,7 @@ The get_ipv4_dst_port() function is shown below:
.. code-block:: c
static inline uint8_t
- get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
+ get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
int ret = 0;
union ipv4_5tuple_host key;
@@ -312,7 +293,7 @@ The key code snippet of simple_ipv4_fwd_4pkts() is shown below:
.. code-block:: c
static inline void
- simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *qconf)
+ simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint16_t portid, struct lcore_conf *qconf)
{
// ...
@@ -351,10 +332,10 @@ for LPM-based lookups is done by the get_ipv4_dst_port() function below:
.. code-block:: c
- static inline uint8_t
- get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
+ static inline uint16_t
+ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint8_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? next_hop : portid);
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct, rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? next_hop : portid);
}
diff --git a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
index a6aa4fb1..dfd372fb 100644
--- a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
+++ b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
@@ -252,28 +252,9 @@ Once the application starts, it transitions through three phases:
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l3fwd-acl
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK IPL Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l3fwd-acl`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/l3_forward_power_man.rst b/doc/guides/sample_app_ug/l3_forward_power_man.rst
index ea9c404d..20fcc3c6 100644
--- a/doc/guides/sample_app_ug/l3_forward_power_man.rst
+++ b/doc/guides/sample_app_ug/l3_forward_power_man.rst
@@ -104,28 +104,9 @@ instead of always running to the C0 state waiting for packets.
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l3fwd-power
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``l3fwd-power`` sub-directory.
Running the Application
-----------------------
@@ -181,11 +162,11 @@ responsible for checking if it needs to scale down frequency at run time by chec
struct lcore_conf *qconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
// ...
diff --git a/doc/guides/sample_app_ug/l3_forward_virtual.rst b/doc/guides/sample_app_ug/l3_forward_virtual.rst
index 5f9d8948..95e89651 100644
--- a/doc/guides/sample_app_ug/l3_forward_virtual.rst
+++ b/doc/guides/sample_app_ug/l3_forward_virtual.rst
@@ -64,34 +64,9 @@ The set of LPM rules used by the application is statically configured and loaded
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/l3fwd-vf
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
-
-.. note::
-
- The compiled application is written to the build subdirectory.
- To have the application written to a different location,
- the O=/path/to/build/directory option may be specified in the make command.
+The application is located in the ``l3fwd-vf`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/link_status_intr.rst b/doc/guides/sample_app_ug/link_status_intr.rst
index f9af4749..358524c3 100644
--- a/doc/guides/sample_app_ug/link_status_intr.rst
+++ b/doc/guides/sample_app_ug/link_status_intr.rst
@@ -55,32 +55,9 @@ and the behavior of L2 forwarding each time the link status changes.
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/link_status_interrupt
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
-
-.. note::
-
- The compiled application is written to the build subdirectory.
- To have the application written to a different location,
- the O=/path/to/build/directory option may be specified on the make command line.
+The application is located in the ``link_status_interrupt`` sub-directory.
Running the Application
-----------------------
@@ -219,7 +196,7 @@ An example callback function that has been written as indicated below.
.. code-block:: c
static void
- lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+ lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param)
{
struct rte_eth_link link;
@@ -405,7 +382,7 @@ If the table is full, the whole packets table is transmitted using the lsi_send_
/* Send the packet on an output interface */
static int
- lsi_send_packet(struct rte_mbuf *m, uint8_t port)
+ lsi_send_packet(struct rte_mbuf *m, uint16_t port)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
diff --git a/doc/guides/sample_app_ug/load_balancer.rst b/doc/guides/sample_app_ug/load_balancer.rst
index e101a5f2..18b29b24 100644
--- a/doc/guides/sample_app_ug/load_balancer.rst
+++ b/doc/guides/sample_app_ug/load_balancer.rst
@@ -86,24 +86,9 @@ The routing logic is LPM based, with all the worker threads sharing the same LPM
Compiling the Application
-------------------------
-The sequence of steps used to build the application is:
+To compile the sample application see :doc:`compiling`.
-#. Export the required environment variables:
-
- .. code-block:: console
-
- export RTE_SDK=<Path to the DPDK installation folder>
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application executable file:
-
- .. code-block:: console
-
- cd ${RTE_SDK}/examples/load_balancer
- make
-
- For more details on how to build the DPDK libraries and sample applications,
- please refer to the *DPDK Getting Started Guide.*
+The application is located in the ``load_balancer`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/multi_process.rst b/doc/guides/sample_app_ug/multi_process.rst
index d4df2fa7..95a32432 100644
--- a/doc/guides/sample_app_ug/multi_process.rst
+++ b/doc/guides/sample_app_ug/multi_process.rst
@@ -40,31 +40,13 @@ Example Applications
Building the Sample Applications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
The multi-process example applications are built in the same way as other sample applications,
and as documented in the *DPDK Getting Started Guide*.
-To build all the example applications:
-
-#. Set RTE_SDK and go to the example directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/multi_process
-
-#. Set the target (a default target will be used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-#. Build the applications:
- .. code-block:: console
+To compile the sample application see :doc:`compiling`.
- make
+The applications are located in the ``multi_process`` sub-directory.
.. note::
diff --git a/doc/guides/sample_app_ug/netmap_compatibility.rst b/doc/guides/sample_app_ug/netmap_compatibility.rst
index 030fd980..955f86b9 100644
--- a/doc/guides/sample_app_ug/netmap_compatibility.rst
+++ b/doc/guides/sample_app_ug/netmap_compatibility.rst
@@ -121,29 +121,12 @@ The bridge application is an example largely based on the bridge example shipped
It shows how a minimal Netmap application with minimal and straightforward source code changes can be run on top of the DPDK.
Please refer to ``$RTE_SDK/examples/netmap_compat/bridge/bridge.c`` for an example of a ported application.
-Compiling the "bridge" Sample Application
------------------------------------------
+Compiling the Application
+-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/netmap_compat
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide for Linux* for possible ``RTE_TARGET`` values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``netmap_compat`` sub-directory.
Running the "bridge" Sample Application
---------------------------------------
diff --git a/doc/guides/sample_app_ug/packet_ordering.rst b/doc/guides/sample_app_ug/packet_ordering.rst
index ef851500..098cc89f 100644
--- a/doc/guides/sample_app_ug/packet_ordering.rst
+++ b/doc/guides/sample_app_ug/packet_ordering.rst
@@ -51,28 +51,11 @@ The application uses at least three CPU cores:
from the reorder buffer and sends them to the NIC ports for transmission.
Compiling the Application
---------------------------
+-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/helloworld
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started* Guide for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``packet_ordering`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/performance_thread.rst b/doc/guides/sample_app_ug/performance_thread.rst
index a55a6246..57391caf 100644
--- a/doc/guides/sample_app_ug/performance_thread.rst
+++ b/doc/guides/sample_app_ug/performance_thread.rst
@@ -73,28 +73,10 @@ invalid.
Compiling the Application
-------------------------
-The application is located in the sample application folder in the
-``performance-thread`` folder.
-#. Go to the example applications folder
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/performance-thread/l3fwd-thread
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Linux Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- make
+To compile the sample application see :doc:`compiling`.
+The application is located in the `performance-thread/l3fwd-thread` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/ptpclient.rst b/doc/guides/sample_app_ug/ptpclient.rst
index d3ef1d11..9cbb6c2a 100644
--- a/doc/guides/sample_app_ug/ptpclient.rst
+++ b/doc/guides/sample_app_ug/ptpclient.rst
@@ -78,39 +78,20 @@ The adjustment for slave can be represented as:
If the command line parameter ``-T 1`` is used the application also
synchronizes the PTP PHC clock with the Linux kernel clock.
-
Compiling the Application
-------------------------
-To compile the application, export the path to the DPDK source tree and edit
-the ``config/common_linuxapp`` configuration file to enable IEEE1588:
-
-.. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
-
- # Edit common_linuxapp and set the following options:
- CONFIG_RTE_LIBRTE_IEEE1588=y
-
-Set the target, for example:
+To compile the sample application see :doc:`compiling`.
-.. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started* Guide for possible ``RTE_TARGET`` values.
-
-Build the application as follows:
-
-.. code-block:: console
+The application is located in the ``ptpclient`` sub-directory.
- # Recompile DPDK.
- make install T=$RTE_TARGET
+.. note::
+ To compile the application edit the ``config/common_linuxapp`` configuration file to enable IEEE1588
+ and then recompile DPDK:
- # Compile the application.
- cd ${RTE_SDK}/examples/ptpclient
- make
+ .. code-block:: console
+ CONFIG_RTE_LIBRTE_IEEE1588=y
Running the Application
-----------------------
@@ -257,7 +238,7 @@ PTP IEEE1588 L2 functionality.
.. code-block:: c
void
- parse_ptp_frames(uint8_t portid, struct rte_mbuf *m) {
+ parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
struct ptp_header *ptp_hdr;
struct ether_hdr *eth_hdr;
uint16_t eth_type;
diff --git a/doc/guides/sample_app_ug/qos_metering.rst b/doc/guides/sample_app_ug/qos_metering.rst
index e1a6ac7a..067de9e1 100644
--- a/doc/guides/sample_app_ug/qos_metering.rst
+++ b/doc/guides/sample_app_ug/qos_metering.rst
@@ -64,29 +64,9 @@ all the incoming packets are colored as green.
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/qos_meter
-
-#. Set the target
- (a default target is used if not specified):
-
- .. note::
-
- This application is intended as a linuxapp only.
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``qos_meter`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/qos_scheduler.rst b/doc/guides/sample_app_ug/qos_scheduler.rst
index a6654cb5..1c38d191 100644
--- a/doc/guides/sample_app_ug/qos_scheduler.rst
+++ b/doc/guides/sample_app_ug/qos_scheduler.rst
@@ -58,31 +58,14 @@ The TX thread, if present, reads from the TX ring and write the packets to the T
Compiling the Application
-------------------------
-To compile the application:
+To compile the sample application see :doc:`compiling`.
-#. Go to the sample application directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/qos_sched
-
-#. Set the target (a default target is used if not specified). For example:
+The application is located in the ``qos_sched`` sub-directory.
.. note::
This application is intended as a linuxapp only.
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application:
-
- .. code-block:: console
-
- make
-
.. note::
To get statistics on the sample app using the command line interface as described in the next section,
diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index 2c3a4320..32bd78d7 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -88,30 +88,12 @@ as shown in :numref:`figure_ring_pipeline_perf_setup`.
Ring-based Processing Pipeline Performance Setup
-
Compiling the Application
-------------------------
-#. Go to the example directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/quota_watermark
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
+To compile the sample application see :doc:`compiling`.
- make
+The application is located in the ``quota_watermark`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/rxtx_callbacks.rst b/doc/guides/sample_app_ug/rxtx_callbacks.rst
index 8d1bb86f..928c9635 100644
--- a/doc/guides/sample_app_ug/rxtx_callbacks.rst
+++ b/doc/guides/sample_app_ug/rxtx_callbacks.rst
@@ -45,23 +45,9 @@ prior to transmission to calculate the elapsed time, in CPU cycles.
Compiling the Application
-------------------------
-To compile the application export the path to the DPDK source tree and go to
-the example directory:
+To compile the sample application see :doc:`compiling`.
-.. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
-
- cd ${RTE_SDK}/examples/rxtx_callbacks
-
-
-Set the target, for example:
-
-.. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started* Guide for possible ``RTE_TARGET`` values.
+The application is located in the ``rxtx_callbacks`` sub-directory.
The callbacks feature requires that the ``CONFIG_RTE_ETHDEV_RXTX_CALLBACKS``
setting is on in the ``config/common_`` config file that applies to the
@@ -71,13 +57,6 @@ target. This is generally on by default:
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
-Build the application as follows:
-
-.. code-block:: console
-
- make
-
-
Running the Application
-----------------------
@@ -124,7 +103,7 @@ comments:
.. code-block:: c
static inline int
- port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -196,7 +175,7 @@ all packets received:
.. code-block:: c
static uint16_t
- add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+ add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
{
unsigned i;
@@ -222,7 +201,7 @@ packets prior to transmission:
.. code-block:: c
static uint16_t
- calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+ calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
{
uint64_t cycles = 0;
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
index c2a5f20a..8891f3b3 100644
--- a/doc/guides/sample_app_ug/server_node_efd.rst
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -108,26 +108,9 @@ that this is a new flow, which is dropped.
Compiling the Application
-------------------------
-The sequence of steps used to build the application is:
-
-#. Export the required environment variables:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application executable file:
-
- .. code-block:: console
-
- cd ${RTE_SDK}/examples/server_node_efd/
- make
-
- For more details on how to build the DPDK libraries and sample
- applications,
- please refer to the *DPDK Getting Started Guide.*
+To compile the sample application see :doc:`compiling`.
+The application is located in the ``server_node_efd`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/service_cores.rst b/doc/guides/sample_app_ug/service_cores.rst
new file mode 100644
index 00000000..36c1b3c5
--- /dev/null
+++ b/doc/guides/sample_app_ug/service_cores.rst
@@ -0,0 +1,172 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Service Cores Sample Application
+================================
+
+The service cores sample application demonstrates the service cores capabilities
+of DPDK. The service cores infrastructure is part of the DPDK EAL, and allows
+any DPDK component to register a service. A service is a work item or task, that
+requires CPU time to perform its duty.
+
+This sample application registers 5 dummy services. These 5 services are used
+to show how the service_cores API can be used to orchestrate these services to
+run on different service lcores. This orchestration is done by calling the
+service cores APIs, however the sample application introduces a "profile"
+concept to contain the service mapping details. Note that the profile concept
+is application specific, and not a part of the service cores API.
+
+
+Compiling the Application
+-------------------------
+
+#. Go to the example directory:
+
+ .. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+ cd ${RTE_SDK}/examples/service_cores
+
+#. Set the target (a default target is used if not specified). For example:
+
+ .. code-block:: console
+
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+ See the *DPDK Getting Started* Guide for possible RTE_TARGET values.
+
+#. Build the application:
+
+ .. code-block:: console
+
+ make
+
+Running the Application
+-----------------------
+
+To run the example, just execute the binary. Since the application dynamically
+adds service cores in the application code itself, there is no requirement to
+pass a service core-mask as an EAL argument at startup time.
+
+.. code-block:: console
+
+ $ ./build/service_cores
+
+
+Explanation
+-----------
+
+The following sections provide some explanation of code focusing on
+registering applications from an applications point of view, and modifying the
+service core counts and mappings at runtime.
+
+
+Registering a Service
+~~~~~~~~~~~~~~~~~~~~~
+
+The following code section shows how to register a service as an application.
+Note that the service component header must be included by the application in
+order to register services: ``rte_service_component.h``, in addition
+to the ordinary service cores header ``rte_service.h`` which provides
+the runtime functions to add, remove and remap service cores.
+
+.. code-block:: c
+
+ struct rte_service_spec service = {
+ .name = "service_name",
+ };
+ int ret = rte_service_component_register(services, &id);
+ if (ret)
+ return -1;
+
+ /* set the service itself to be ready to run. In the case of
+ * ethdev, eventdev etc PMDs, this will be set when the
+ * appropriate configure or setup function is called.
+ */
+ rte_service_component_runstate_set(id, 1);
+
+ /* Collect statistics for the service */
+ rte_service_set_stats_enable(id, 1);
+
+ /* The application sets the service to running state. Note that this
+ * function enables the service to run - while the 'component' version
+ * of this function (as above) marks the service itself as ready */
+ ret = rte_service_runstate_set(id, 1);
+
+
+Controlling A Service Core
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section demonstrates how to add a service core. The ``rte_service.h``
+header file provides the functions for dynamically adding and removing cores.
+The APIs to add and remove cores use lcore IDs similar to existing DPDK
+functions.
+
+These are the functions to start a service core, and have it run a service:
+
+.. code-block:: c
+
+ /* the lcore ID to use as a service core */
+ uint32_t service_core_id = 7;
+ ret = rte_service_lcore_add(service_core_id);
+ if(ret)
+ return -1;
+
+ /* service cores are in "stopped" state when added, so start it */
+ ret = rte_service_lcore_start(service_core_id);
+ if(ret)
+ return -1;
+
+ /* map a service to the service core, causing it to run the service */
+ uint32_t service_id; /* ID of a registered service */
+ uint32_t enable = 1; /* 1 maps the service, 0 unmaps */
+ ret = rte_service_map_lcore_set(service_id, service_core_id, enable);
+ if(ret)
+ return -1;
+
+
+Removing A Service Core
+~~~~~~~~~~~~~~~~~~~~~~~
+
+To remove a service core, the steps are similar to adding but in reverse order.
+Note that it is not allowed to remove a service core if the service is running,
+and the service-core is the only core running that service (see documentation
+for ``rte_service_lcore_stop`` function for details).
+
+
+Conclusion
+~~~~~~~~~~
+
+The service cores infrastructure provides DPDK with two main features. The first
+is to abstract away hardware differences: the service core can CPU cycles to
+a software fallback implementation, allowing the application to be abstracted
+from the difference in HW / SW availability. The second feature is a flexible
+method of registering functions to be run, allowing the running of the
+functions to be scaled across multiple CPUs.
diff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst
index c39b0332..9c78f494 100644
--- a/doc/guides/sample_app_ug/skeleton.rst
+++ b/doc/guides/sample_app_ug/skeleton.rst
@@ -39,33 +39,12 @@ It is intended as a demonstration of the basic components of a DPDK forwarding
application. For more detailed implementations see the L2 and L3 forwarding
sample applications.
-
Compiling the Application
-------------------------
-To compile the application export the path to the DPDK source tree and go to
-the example directory:
-
-.. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
-
- cd ${RTE_SDK}/examples/skeleton
-
-Set the target, for example:
-
-.. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-See the *DPDK Getting Started* Guide for possible ``RTE_TARGET`` values.
-
-Build the application as follows:
-
-.. code-block:: console
-
- make
+To compile the sample application see :doc:`compiling`.
+The application is located in the ``skeleton`` sub-directory.
Running the Application
-----------------------
@@ -160,7 +139,7 @@ Forwarding application is shown below:
.. code-block:: c
static inline int
- port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -241,8 +220,8 @@ looks like the following:
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
diff --git a/doc/guides/sample_app_ug/tep_termination.rst b/doc/guides/sample_app_ug/tep_termination.rst
index 087823b1..04bf8a2c 100644
--- a/doc/guides/sample_app_ug/tep_termination.rst
+++ b/doc/guides/sample_app_ug/tep_termination.rst
@@ -121,39 +121,16 @@ The example in this section have been validated with the following distributions
Compiling the Sample Code
-------------------------
-#. Compile vhost lib:
- To enable vhost, turn on vhost library in the configure file config/common_linuxapp.
+To enable vhost, turn on vhost library in the configure file
+``config/common_linuxapp``.
.. code-block:: console
CONFIG_RTE_LIBRTE_VHOST=y
-
-#. Go to the examples directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/tep_termination
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the DPDK Getting Started Guide for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- cd ${RTE_SDK}
- make config ${RTE_TARGET}
- make install ${RTE_TARGET}
- cd ${RTE_SDK}/examples/tep_termination
- make
+Then following the to compile the sample application shown in
+:doc:`compiling`.
Running the Sample Code
-----------------------
diff --git a/doc/guides/sample_app_ug/test_pipeline.rst b/doc/guides/sample_app_ug/test_pipeline.rst
index 95c7e0fc..f538cd05 100644
--- a/doc/guides/sample_app_ug/test_pipeline.rst
+++ b/doc/guides/sample_app_ug/test_pipeline.rst
@@ -55,28 +55,12 @@ The application uses three CPU cores:
Test Pipeline Application
-
Compiling the Application
-------------------------
+To compile the sample application see :doc:`compiling`
-#. Go to the app/test directory:
-
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/app/test/test-pipeline
-
-#. Set the target (a default target is used if not specified):
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-#. Build the application:
-
- .. code-block:: console
+The application is located in the ``$RTE_SDK/test/test-pipline`` directory.
- make
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/timer.rst b/doc/guides/sample_app_ug/timer.rst
index 00b69532..46a3a2fa 100644
--- a/doc/guides/sample_app_ug/timer.rst
+++ b/doc/guides/sample_app_ug/timer.rst
@@ -37,26 +37,9 @@ This application prints some messages from different lcores regularly, demonstra
Compiling the Application
-------------------------
-#. Go to the example directory:
+To compile the sample application see :doc:`compiling`.
- .. code-block:: console
-
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/timer
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible *RTE_TARGET* values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``timer`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index a2a3909e..5735adbb 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -55,20 +55,12 @@ puts back to the same physical NIC port.
Build
~~~~~
-Follow the *Getting Started Guide for Linux* on generic info about
-environment setup and building DPDK from source.
+To compile the sample application see :doc:`compiling`.
-In this example, you need build DPDK both on the host and inside guest.
-Also, you need build this example.
-
-.. code-block:: console
-
- export RTE_SDK=/path/to/dpdk_source
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- cd ${RTE_SDK}/examples/vhost
- make
+The application is located in the ``vhost`` sub-directory.
+.. note::
+ In this example, you need build DPDK both on the host and inside guest.
Start the vswitch example
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/vhost_scsi.rst b/doc/guides/sample_app_ug/vhost_scsi.rst
index 8be069e0..1b4dab32 100644
--- a/doc/guides/sample_app_ug/vhost_scsi.rst
+++ b/doc/guides/sample_app_ug/vhost_scsi.rst
@@ -51,23 +51,14 @@ Testing steps
This section shows the steps how to start a VM with the block device as
fast data path for critical application.
-Build
-~~~~~
+Compiling the Application
+-------------------------
-Follow the *Getting Started Guide for Linux* on generic info about
-environment setup and building DPDK from source.
+To compile the sample application see :doc:`compiling`.
-In this example, you need build DPDK both on the host and inside guest.
-Also, you need build this example.
-
-.. code-block:: console
-
- export RTE_SDK=/path/to/dpdk_source
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- cd ${RTE_SDK}/examples/vhost_scsi
- make
+The application is located in the ``examples`` sub-directory.
+You will also need to build DPDK both on the host and inside the guest
Start the vhost_scsi example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst
index 05c26b03..fe53706e 100644
--- a/doc/guides/sample_app_ug/vm_power_management.rst
+++ b/doc/guides/sample_app_ug/vm_power_management.rst
@@ -201,9 +201,12 @@ Compiling and Running the Host Application
Compiling
~~~~~~~~~
-#. export RTE_SDK=/path/to/rte_sdk
-#. cd ${RTE_SDK}/examples/vm_power_manager
-#. make
+Compiling the Application
+-------------------------
+
+To compile the sample application see :doc:`compiling`.
+
+The application is located in the ``vm_power_manager`` sub-directory.
Running
~~~~~~~
diff --git a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
index 70e1d19e..4a6c70d2 100644
--- a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
@@ -86,26 +86,11 @@ No command-line options are taken by this application apart from the standard EA
Compiling the Application
-------------------------
-#. Go to the examples directory:
- .. code-block:: console
- export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SDK}/examples/vmdq_dcb
+To compile the sample application see :doc:`compiling`.
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- make
+The application is located in the ``vmdq_dcb`` sub-directory.
Running the Application
-----------------------
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index e8303f3b..4c0d2ced 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -94,10 +94,6 @@ See the DPDK Getting Started Guides for more information on these options.
Display the version information on startup.
-* ``--xen-dom0``
-
- Support application running on Xen Domain0 without hugetlbfs.
-
* ``--syslog``
Set the syslog facility.
@@ -110,6 +106,10 @@ See the DPDK Getting Started Guides for more information on these options.
Specify the directory where the hugetlbfs is mounted.
+* ``mbuf-pool-ops-name``:
+
+ Pool ops name for mbuf to use.
+
* ``--proc-type``
Set the type of the current process.
@@ -362,6 +362,7 @@ The commandline options are:
csum
icmpecho
ieee1588
+ tm
* ``--rss-ip``
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index 2ed62f5b..9789139a 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -347,6 +347,13 @@ The available information categories are:
* ``ieee1588``: Demonstrate L2 IEEE1588 V2 PTP timestamping for RX and TX. Requires ``CONFIG_RTE_LIBRTE_IEEE1588=y``.
+* ``tm``: Traffic Management forwarding mode
+ Demonstrates the use of ethdev traffic management APIs and softnic PMD for
+ QoS traffic management. In this mode, 5-level hierarchical QoS scheduler is
+ available as an default option that can be enabled through CLI. The user can
+ also modify the default hierarchy or specify the new hierarchy through CLI for
+ implementing QoS scheduler. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y`` ``CONFIG_RTE_LIBRTE_SCHED=y``.
+
Note: TX timestamping is only available in the "Full Featured" TX path. To force ``testpmd`` into this mode set ``--txqflags=0``.
Example::
@@ -392,7 +399,7 @@ ddp get info
Display information about dynamic device personalization (DDP) profile::
- testpmd> ddp get info (profile_patch)
+ testpmd> ddp get info (profile_path)
show vf stats
~~~~~~~~~~~~~
@@ -408,6 +415,14 @@ Reset VF statistics::
testpmd> clear vf stats (port_id) (vf_id)
+show port pctype mapping
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+List all items from the pctype mapping table::
+
+ testpmd> show port (port_id) pctype mapping
+
+
Configuration Functions
-----------------------
@@ -842,6 +857,49 @@ Where:
Check the NIC Datasheet for hardware limits.
+RSS queue region
+~~~~~~~~~~~~~~~~
+
+Set RSS queue region span on a port::
+
+ testpmd> set port (port_id) queue-region region_id (value) \
+ queue_start_index (value) queue_num (value)
+
+Set flowtype mapping on a RSS queue region on a port::
+
+ testpmd> set port (port_id) queue-region region_id (value) flowtype (value)
+
+where:
+
+* For the flowtype(pctype) of packet,the specific index for each type has
+ been defined in file i40e_type.h as enum i40e_filter_pctype.
+
+Set user priority mapping on a RSS queue region on a port::
+
+ testpmd> set port (port_id) queue-region UP (value) region_id (value)
+
+Flush all queue region related configuration on a port::
+
+ testpmd> set port (port_id) queue-region flush (on|off)
+
+where:
+
+* "on"is just an enable function which server for other configuration,
+ it is for all configuration about queue region from up layer,
+ at first will only keep in DPDK softwarestored in driver,
+ only after "flush on", it commit all configuration to HW.
+ "off" is just clean all configuration about queue region just now,
+ and restore all to DPDK i40e driver default config when start up.
+
+Show all queue region related configuration info on a port::
+
+ testpmd> show port (port_id) queue-region
+
+.. note::
+
+ Queue region only support on PF by now, so these command is
+ only for configuration of queue region on PF port.
+
csum parse-tunnel
~~~~~~~~~~~~~~~~~
@@ -898,12 +956,12 @@ Display the status of TCP Segmentation Offload::
testpmd> tso show (port_id)
-gro
-~~~
+set port - gro
+~~~~~~~~~~~~~~
Enable or disable GRO in ``csum`` forwarding engine::
- testpmd> gro (on|off) (port_id)
+ testpmd> set port <port_id> gro on|off
If enabled, the csum forwarding engine will perform GRO on the TCP/IPv4
packets received from the given port.
@@ -914,23 +972,89 @@ GRO. By default, GRO is disabled for all ports.
.. note::
When enable GRO for a port, TCP/IPv4 packets received from the port
- will be performed GRO. After GRO, the merged packets are multi-segments.
- But csum forwarding engine doesn't support to calculate TCP checksum
- for multi-segment packets in SW. So please select TCP HW checksum
- calculation for the port which GROed packets are transmitted to.
+ will be performed GRO. After GRO, all merged packets have bad
+ checksums, since the GRO library doesn't re-calculate checksums for
+ the merged packets. Therefore, if users want the merged packets to
+ have correct checksums, please select HW IP checksum calculation and
+ HW TCP checksum calculation for the port which the merged packets are
+ transmitted to.
+
+show port - gro
+~~~~~~~~~~~~~~~
-gro set
-~~~~~~~
+Display GRO configuration for a given port::
-Set max flow number and max packet number per-flow for GRO::
+ testpmd> show port <port_id> gro
+
+set gro flush
+~~~~~~~~~~~~~
- testpmd> gro set (max_flow_num) (max_item_num_per_flow) (port_id)
+Set the cycle to flush the GROed packets from reassembly tables::
-The product of ``max_flow_num`` and ``max_item_num_per_flow`` is the max
-number of packets a GRO table can store.
+ testpmd> set gro flush <cycles>
-If current packet number is greater than or equal to the max value, GRO
-will stop processing incoming packets.
+When enable GRO, the csum forwarding engine performs GRO on received
+packets, and the GROed packets are stored in reassembly tables. Users
+can use this command to determine when the GROed packets are flushed
+from the reassembly tables.
+
+The ``cycles`` is measured in GRO operation times. The csum forwarding
+engine flushes the GROed packets from the tables every ``cycles`` GRO
+operations.
+
+By default, the value of ``cycles`` is 1, which means flush GROed packets
+from the reassembly tables as soon as one GRO operation finishes. The value
+of ``cycles`` should be in the range of 1 to ``GRO_MAX_FLUSH_CYCLES``.
+
+Please note that the large value of ``cycles`` may cause the poor TCP/IP
+stack performance. Because the GROed packets are delayed to arrive the
+stack, thus causing more duplicated ACKs and TCP retransmissions.
+
+set port - gso
+~~~~~~~~~~~~~~
+
+Toggle per-port GSO support in ``csum`` forwarding engine::
+
+ testpmd> set port <port_id> gso on|off
+
+If enabled, the csum forwarding engine will perform GSO on supported IPv4
+packets, transmitted on the given port.
+
+If disabled, packets transmitted on the given port will not undergo GSO.
+By default, GSO is disabled for all ports.
+
+.. note::
+
+ When GSO is enabled on a port, supported IPv4 packets transmitted on that
+ port undergo GSO. Afterwards, the segmented packets are represented by
+ multi-segment mbufs; however, the csum forwarding engine doesn't calculation
+ of checksums for GSO'd segments in SW. As a result, if users want correct
+ checksums in GSO segments, they should enable HW checksum calculation for
+ GSO-enabled ports.
+
+ For example, HW checksum calculation for VxLAN GSO'd packets may be enabled
+ by setting the following options in the csum forwarding engine:
+
+ testpmd> csum set outer_ip hw <port_id>
+
+ testpmd> csum set ip hw <port_id>
+
+ testpmd> csum set tcp hw <port_id>
+
+set gso segsz
+~~~~~~~~~~~~~
+
+Set the maximum GSO segment size (measured in bytes), which includes the
+packet header and the packet payload for GSO-enabled ports (global)::
+
+ testpmd> set gso segsz <length>
+
+show port - gso
+~~~~~~~~~~~~~~~
+
+Display the status of Generic Segmentation Offload for a given port::
+
+ testpmd> show port <port_id> gso
mac_addr add
~~~~~~~~~~~~
@@ -1096,6 +1220,17 @@ For example, to set rx queue 2 on port 0 to mapping 5::
testpmd>set stat_qmap rx 0 2 5
+set xstats-hide-zero
+~~~~~~~~~~~~~~~~~~~~
+
+Set the option to hide zero values for xstats display::
+
+ testpmd> set xstats-hide-zero on|off
+
+.. note::
+
+ By default, the zero values are displayed for xstats.
+
set port - rx/tx (for VF)
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1697,6 +1832,23 @@ Enable/disable the E-tag support::
testpmd> port config (port_id|all) l2-tunnel E-tag (enable|disable)
+port config pctype mapping
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Reset pctype mapping table::
+
+ testpmd> port config (port_id) pctype mapping reset
+
+Update hardware defined pctype to software defined flow type mapping table::
+
+ testpmd> port config (port_id) pctype mapping update (pctype_id_0[,pctype_id_1]*) (flow_type_id)
+
+where:
+
+* ``pctype_id_x``: hardware pctype id as index of bit in bitmask value of the pctype mapping table.
+
+* ``flow_type_id``: software flow type id as the index of the pctype mapping table.
+
Link Bonding Functions
----------------------
@@ -1917,6 +2069,261 @@ For example, to set the high bit in the register from the example above::
port 0 PCI register at offset 0xEE00: 0x8000000A (2147483658)
+Traffic Management
+------------------
+
+The following section shows functions for configuring traffic management on
+on the ethernet device through the use of generic TM API.
+
+show port traffic management capability
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Show traffic management capability of the port::
+
+ testpmd> show port tm cap (port_id)
+
+show port traffic management capability (hierarchy level)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Show traffic management hierarchy level capability of the port::
+
+ testpmd> show port tm cap (port_id) (level_id)
+
+show port traffic management capability (hierarchy node level)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Show the traffic management hierarchy node capability of the port::
+
+ testpmd> show port tm cap (port_id) (node_id)
+
+show port traffic management hierarchy node type
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Show the port traffic management hierarchy node type::
+
+ testpmd> show port tm node type (port_id) (node_id)
+
+show port traffic management hierarchy node stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Show the port traffic management hierarchy node statistics::
+
+ testpmd> show port tm node stats (port_id) (node_id) (clear)
+
+where:
+
+* ``clear``: When this parameter has a non-zero value, the statistics counters
+ are cleared (i.e. set to zero) immediately after they have been read,
+ otherwise the statistics counters are left untouched.
+
+Add port traffic management private shaper profile
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Add the port traffic management private shaper profile::
+
+ testpmd> add port tm node shaper profile (port_id) (shaper_profile_id) \
+ (tb_rate) (tb_size) (packet_length_adjust)
+
+where:
+
+* ``shaper_profile id``: Shaper profile ID for the new profile.
+* ``tb_rate``: Token bucket rate (bytes per second).
+* ``tb_size``: Token bucket size (bytes).
+* ``packet_length_adjust``: The value (bytes) to be added to the length of
+ each packet for the purpose of shaping. This parameter value can be used to
+ correct the packet length with the framing overhead bytes that are consumed
+ on the wire.
+
+Delete port traffic management private shaper profile
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Delete the port traffic management private shaper::
+
+ testpmd> del port tm node shaper profile (port_id) (shaper_profile_id)
+
+where:
+
+* ``shaper_profile id``: Shaper profile ID that needs to be deleted.
+
+Add port traffic management shared shaper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Create the port traffic management shared shaper::
+
+ testpmd> add port tm node shared shaper (port_id) (shared_shaper_id) \
+ (shaper_profile_id)
+
+where:
+
+* ``shared_shaper_id``: Shared shaper ID to be created.
+* ``shaper_profile id``: Shaper profile ID for shared shaper.
+
+Set port traffic management shared shaper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Update the port traffic management shared shaper::
+
+ testpmd> set port tm node shared shaper (port_id) (shared_shaper_id) \
+ (shaper_profile_id)
+
+where:
+
+* ``shared_shaper_id``: Shared shaper ID to be update.
+* ``shaper_profile id``: Shaper profile ID for shared shaper.
+
+Delete port traffic management shared shaper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Delete the port traffic management shared shaper::
+
+ testpmd> del port tm node shared shaper (port_id) (shared_shaper_id)
+
+where:
+
+* ``shared_shaper_id``: Shared shaper ID to be deleted.
+
+Set port traffic management hiearchy node private shaper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+set the port traffic management hierarchy node private shaper::
+
+ testpmd> set port tm node shaper profile (port_id) (node_id) \
+ (shaper_profile_id)
+
+where:
+
+* ``shaper_profile id``: Private shaper profile ID to be enabled on the
+ hierarchy node.
+
+Add port traffic management WRED profile
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Create a new WRED profile::
+
+ testpmd> add port tm node wred profile (port_id) (wred_profile_id) \
+ (color_g) (min_th_g) (max_th_g) (maxp_inv_g) (wq_log2_g) \
+ (color_y) (min_th_y) (max_th_y) (maxp_inv_y) (wq_log2_y) \
+ (color_r) (min_th_r) (max_th_r) (maxp_inv_r) (wq_log2_r)
+
+where:
+
+* ``wred_profile id``: Identifier for the newly create WRED profile
+* ``color_g``: Packet color (green)
+* ``min_th_g``: Minimum queue threshold for packet with green color
+* ``max_th_g``: Minimum queue threshold for packet with green color
+* ``maxp_inv_g``: Inverse of packet marking probability maximum value (maxp)
+* ``wq_log2_g``: Negated log2 of queue weight (wq)
+* ``color_y``: Packet color (yellow)
+* ``min_th_y``: Minimum queue threshold for packet with yellow color
+* ``max_th_y``: Minimum queue threshold for packet with yellow color
+* ``maxp_inv_y``: Inverse of packet marking probability maximum value (maxp)
+* ``wq_log2_y``: Negated log2 of queue weight (wq)
+* ``color_r``: Packet color (red)
+* ``min_th_r``: Minimum queue threshold for packet with yellow color
+* ``max_th_r``: Minimum queue threshold for packet with yellow color
+* ``maxp_inv_r``: Inverse of packet marking probability maximum value (maxp)
+* ``wq_log2_r``: Negated log2 of queue weight (wq)
+
+Delete port traffic management WRED profile
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Delete the WRED profile::
+
+ testpmd> del port tm node wred profile (port_id) (wred_profile_id)
+
+Add port traffic management hierarchy nonleaf node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Add nonleaf node to port traffic management hiearchy::
+
+ testpmd> add port tm nonleaf node (port_id) (node_id) (parent_node_id) \
+ (priority) (weight) (level_id) (shaper_profile_id) \
+ (n_sp_priorities) (stats_mask) (n_shared_shapers) \
+ [(shared_shaper_0) (shared_shaper_1) ...] \
+
+where:
+
+* ``parent_node_id``: Node ID of the parent.
+* ``priority``: Node priority (highest node priority is zero). This is used by
+ the SP algorithm running on the parent node for scheduling this node.
+* ``weight``: Node weight (lowest weight is one). The node weight is relative
+ to the weight sum of all siblings that have the same priority. It is used by
+ the WFQ algorithm running on the parent node for scheduling this node.
+* ``level_id``: Hiearchy level of the node.
+* ``shaper_profile_id``: Shaper profile ID of the private shaper to be used by
+ the node.
+* ``n_sp_priorities``: Number of strict priorities.
+* ``stats_mask``: Mask of statistics counter types to be enabled for this node.
+* ``n_shared_shapers``: Number of shared shapers.
+* ``shared_shaper_id``: Shared shaper id.
+
+Add port traffic management hierarchy leaf node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Add leaf node to port traffic management hiearchy::
+
+ testpmd> add port tm leaf node (port_id) (node_id) (parent_node_id) \
+ (priority) (weight) (level_id) (shaper_profile_id) \
+ (cman_mode) (wred_profile_id) (stats_mask) (n_shared_shapers) \
+ [(shared_shaper_id) (shared_shaper_id) ...] \
+
+where:
+
+* ``parent_node_id``: Node ID of the parent.
+* ``priority``: Node priority (highest node priority is zero). This is used by
+ the SP algorithm running on the parent node for scheduling this node.
+* ``weight``: Node weight (lowest weight is one). The node weight is relative
+ to the weight sum of all siblings that have the same priority. It is used by
+ the WFQ algorithm running on the parent node for scheduling this node.
+* ``level_id``: Hiearchy level of the node.
+* ``shaper_profile_id``: Shaper profile ID of the private shaper to be used by
+ the node.
+* ``cman_mode``: Congestion management mode to be enabled for this node.
+* ``wred_profile_id``: WRED profile id to be enabled for this node.
+* ``stats_mask``: Mask of statistics counter types to be enabled for this node.
+* ``n_shared_shapers``: Number of shared shapers.
+* ``shared_shaper_id``: Shared shaper id.
+
+Delete port traffic management hierarchy node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Delete node from port traffic management hiearchy::
+
+ testpmd> del port tm node (port_id) (node_id)
+
+Update port traffic management hierarchy parent node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Update port traffic management hierarchy parent node::
+
+ testpmd> set port tm node parent (port_id) (node_id) (parent_node_id) \
+ (priority) (weight)
+
+This function can only be called after the hierarchy commit invocation. Its
+success depends on the port support for this operation, as advertised through
+the port capability set. This function is valid for all nodes of the traffic
+management hierarchy except root node.
+
+Commit port traffic management hierarchy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Commit the traffic management hierarchy on the port::
+
+ testpmd> port tm hierarchy commit (port_id) (clean_on_fail)
+
+where:
+
+* ``clean_on_fail``: When set to non-zero, hierarchy is cleared on function
+ call failure. On the other hand, hierarchy is preserved when this parameter
+ is equal to zero.
+
+Set port traffic management default hierarchy (tm forwarding mode)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+set the traffic management default hierarchy on the port::
+
+ testpmd> set port tm hierarchy default (port_id)
+
Filter Functions
----------------
@@ -2696,6 +3103,10 @@ This section lists supported pattern items and their attributes, if any.
- ``thresh {unsigned}``: accuracy threshold.
+- ``gtp``, ``gtpc``, ``gtpu``: match GTPv1 header.
+
+ - ``teid {unsigned}``: tunnel endpoint identifier.
+
Actions list
^^^^^^^^^^^^
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 457f817c..7e126770 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -50,7 +50,8 @@ offload are still consumed by the test tool and included in the cycle-count.
These cycles are consumed by retries and inefficient API calls enqueuing and
dequeuing smaller bursts than specified by the cmdline parameter. This results
in a larger cycle-count measurement and should not be interpreted as an offload
-cost measurement.
+cost measurement. Using "pmd-cyclecount" mode will give a better idea of
+actual costs of hardware acceleration.
On hardware devices the throughput measurement is not necessarily the maximum
possible for the device, e.g. it may be necessary to use multiple cores to keep
@@ -134,6 +135,7 @@ The following are the appication command-line options:
throughput
latency
verify
+ pmd-cyclecount
* ``--silent``
@@ -170,9 +172,11 @@ The following are the appication command-line options:
* List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
-* ``--segments-nb <n>``
+* ``--segment-sz <n>``
- Set the number of segments per packet.
+ Set the size of the segment to use, for Scatter Gather List testing.
+ By default, it is set to the size of the maximum buffer size, including the digest size,
+ so a single segment is created.
* ``--devtype <name>``
@@ -186,9 +190,11 @@ The following are the appication command-line options:
crypto_snow3g
crypto_kasumi
crypto_zuc
+ crypto_dpaa_sec
crypto_dpaa2_sec
crypto_armv8
crypto_scheduler
+ crypto_mrvl
* ``--optype <name>``
@@ -325,6 +331,16 @@ The following are the appication command-line options:
Set the size of digest.
+* ``--desc-nb <n>``
+
+ Set number of descriptors for each crypto device.
+
+* ``--pmd-cyclecount-delay-ms <n>``
+
+ Add a delay (in milliseconds) between enqueue and dequeue in
+ pmd-cyclecount benchmarking mode (useful when benchmarking
+ hardware acceleration).
+
* ``--csv-friendly``
Enable test result output CSV friendly rather than human friendly.
diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst
index 34b1c318..5aa2237b 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -106,10 +106,6 @@ The following are the application command-line options:
Set the number of mbufs to be allocated from the mempool.
-* ``--slcore <n>``
-
- Set the scheduler lcore id.(Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
-
* ``--plcores <CORELIST>``
Set the list of cores to be used as producers.
@@ -362,7 +358,6 @@ Supported application command line options are following::
--test
--socket_id
--pool_sz
- --slcore (Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
--plcores
--wlcores
--stlist
@@ -379,8 +374,8 @@ Example command to run perf queue test:
.. code-block:: console
- sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \
- --test=perf_queue --slcore=1 --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
+ sudo build/app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \
+ --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
PERF_ATQ Test
@@ -441,7 +436,6 @@ Supported application command line options are following::
--test
--socket_id
--pool_sz
- --slcore (Valid when eventdev is not RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capable)
--plcores
--wlcores
--stlist
diff --git a/doc/guides/xen/img/dpdk_xen_pkt_switch.png b/doc/guides/xen/img/dpdk_xen_pkt_switch.png
deleted file mode 100644
index 32a6d161..00000000
--- a/doc/guides/xen/img/dpdk_xen_pkt_switch.png
+++ /dev/null
Binary files differ
diff --git a/doc/guides/xen/img/grant_refs.png b/doc/guides/xen/img/grant_refs.png
deleted file mode 100644
index baa34e1e..00000000
--- a/doc/guides/xen/img/grant_refs.png
+++ /dev/null
Binary files differ
diff --git a/doc/guides/xen/img/grant_table.png b/doc/guides/xen/img/grant_table.png
deleted file mode 100644
index c23e5fa7..00000000
--- a/doc/guides/xen/img/grant_table.png
+++ /dev/null
Binary files differ
diff --git a/doc/guides/xen/index.rst b/doc/guides/xen/index.rst
deleted file mode 100644
index cb43cd2f..00000000
--- a/doc/guides/xen/index.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Xen Guide
-=========
-
-.. toctree::
- :maxdepth: 2
- :numbered:
-
- pkt_switch
diff --git a/doc/guides/xen/pkt_switch.rst b/doc/guides/xen/pkt_switch.rst
deleted file mode 100644
index 717a04b2..00000000
--- a/doc/guides/xen/pkt_switch.rst
+++ /dev/null
@@ -1,470 +0,0 @@
-.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-DPDK Xen Based Packet-Switching Solution
-========================================
-
-Introduction
-------------
-
-DPDK provides a para-virtualization packet switching solution, based on the Xen hypervisor's Grant Table, Note 1,
-which provides simple and fast packet switching capability between guest domains and host domain based on MAC address or VLAN tag.
-
-This solution is comprised of two components;
-a Poll Mode Driver (PMD) as the front end in the guest domain and a switching back end in the host domain.
-XenStore is used to exchange configure information between the PMD front end and switching back end,
-including grant reference IDs for shared Virtio RX/TX rings,
-MAC address, device state, and so on. XenStore is an information storage space shared between domains,
-see further information on XenStore below.
-
-The front end PMD can be found in the DPDK directory lib/ librte_pmd_xenvirt and back end example in examples/vhost_xen.
-
-The PMD front end and switching back end use shared Virtio RX/TX rings as para- virtualized interface.
-The Virtio ring is created by the front end, and Grant table references for the ring are passed to host.
-The switching back end maps those grant table references and creates shared rings in a mapped address space.
-
-The following diagram describes the functionality of the DPDK Xen Packet- Switching Solution.
-
-
-.. _figure_dpdk_xen_pkt_switch:
-
-.. figure:: img/dpdk_xen_pkt_switch.*
-
- Functionality of the DPDK Xen Packet Switching Solution.
-
-
-Note 1 The Xen hypervisor uses a mechanism called a Grant Table to share memory between domains
-(`http://wiki.xen.org/wiki/Grant Table <http://wiki.xen.org/wiki/Grant%20Table>`_).
-
-A diagram of the design is shown below, where "gva" is the Guest Virtual Address,
-which is the data pointer of the mbuf, and "hva" is the Host Virtual Address:
-
-
-.. _figure_grant_table:
-
-.. figure:: img/grant_table.*
-
- DPDK Xen Layout
-
-
-In this design, a Virtio ring is used as a para-virtualized interface for better performance over a Xen private ring
-when packet switching to and from a VM.
-The additional performance is gained by avoiding a system call and memory map in each memory copy with a XEN private ring.
-
-Device Creation
----------------
-
-Poll Mode Driver Front End
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* Mbuf pool allocation:
-
- To use a Xen switching solution, the DPDK application should use rte_mempool_gntalloc_create()
- to reserve mbuf pools during initialization.
- rte_mempool_gntalloc_create() creates a mempool with objects from memory allocated and managed via gntalloc/gntdev.
-
- The DPDK now supports construction of mempools from allocated virtual memory through the rte_mempool_xmem_create() API.
-
- This front end constructs mempools based on memory allocated through the xen_gntalloc driver.
- rte_mempool_gntalloc_create() allocates Grant pages, maps them to continuous virtual address space,
- and calls rte_mempool_xmem_create() to build mempools.
- The Grant IDs for all Grant pages are passed to the host through XenStore.
-
-* Virtio Ring Creation:
-
- The Virtio queue size is defined as 256 by default in the VQ_DESC_NUM macro.
- Using the queue setup function,
- Grant pages are allocated based on ring size and are mapped to continuous virtual address space to form the Virtio ring.
- Normally, one ring is comprised of several pages.
- Their Grant IDs are passed to the host through XenStore.
-
- There is no requirement that this memory be physically continuous.
-
-* Interrupt and Kick:
-
- There are no interrupts in DPDK Xen Switching as both front and back ends work in polling mode.
- There is no requirement for notification.
-
-* Feature Negotiation:
-
- Currently, feature negotiation through XenStore is not supported.
-
-* Packet Reception & Transmission:
-
- With mempools and Virtio rings created, the front end can operate Virtio devices,
- as it does in Virtio PMD for KVM Virtio devices with the exception that the host
- does not require notifications or deal with interrupts.
-
-XenStore is a database that stores guest and host information in the form of (key, value) pairs.
-The following is an example of the information generated during the startup of the front end PMD in a guest VM (domain ID 1):
-
-.. code-block:: console
-
- xenstore -ls /local/domain/1/control/dpdk
- 0_mempool_gref="3042,3043,3044,3045"
- 0_mempool_va="0x7fcbc6881000"
- 0_tx_vring_gref="3049"
- 0_rx_vring_gref="3053"
- 0_ether_addr="4e:0b:d0:4e:aa:f1"
- 0_vring_flag="3054"
- ...
-
-Multiple mempools and multiple Virtios may exist in the guest domain, the first number is the index, starting from zero.
-
-The idx#_mempool_va stores the guest virtual address for mempool idx#.
-
-The idx#_ether_adder stores the MAC address of the guest Virtio device.
-
-For idx#_rx_ring_gref, idx#_tx_ring_gref, and idx#_mempool_gref, the value is a list of Grant references.
-Take idx#_mempool_gref node for example, the host maps those Grant references to a continuous virtual address space.
-The real Grant reference information is stored in this virtual address space,
-where (gref, pfn) pairs follow each other with -1 as the terminator.
-
-
-.. _figure_grant_refs:
-
-.. figure:: img/grant_refs.*
-
- Mapping Grant references to a continuous virtual address space
-
-
-After all gref# IDs are retrieved, the host maps them to a continuous virtual address space.
-With the guest mempool virtual address, the host establishes 1:1 address mapping.
-With multiple guest mempools, the host establishes multiple address translation regions.
-
-Switching Back End
-~~~~~~~~~~~~~~~~~~
-
-The switching back end monitors changes in XenStore.
-When the back end detects that a new Virtio device has been created in a guest domain, it will:
-
-#. Retrieve Grant and configuration information from XenStore.
-
-#. Map and create a Virtio ring.
-
-#. Map mempools in the host and establish address translation between the guest address and host address.
-
-#. Select a free VMDQ pool, set its affinity with the Virtio device, and set the MAC/ VLAN filter.
-
-Packet Reception
-~~~~~~~~~~~~~~~~
-
-When packets arrive from an external network, the MAC?VLAN filter classifies packets into queues in one VMDQ pool.
-As each pool is bonded to a Virtio device in some guest domain, the switching back end will:
-
-#. Fetch an available entry from the Virtio RX ring.
-
-#. Get gva, and translate it to hva.
-
-#. Copy the contents of the packet to the memory buffer pointed to by gva.
-
-The DPDK application in the guest domain, based on the PMD front end,
-is polling the shared Virtio RX ring for available packets and receives them on arrival.
-
-Packet Transmission
-~~~~~~~~~~~~~~~~~~~
-
-When a Virtio device in one guest domain is to transmit a packet,
-it puts the virtual address of the packet's data area into the shared Virtio TX ring.
-
-The packet switching back end is continuously polling the Virtio TX ring.
-When new packets are available for transmission from a guest, it will:
-
-#. Fetch an available entry from the Virtio TX ring.
-
-#. Get gva, and translate it to hva.
-
-#. Copy the packet from hva to the host mbuf's data area.
-
-#. Compare the destination MAC address with all the MAC addresses of the Virtio devices it manages.
- If a match exists, it directly copies the packet to the matched VIrtio RX ring.
- Otherwise, it sends the packet out through hardware.
-
-.. note::
-
- The packet switching back end is for demonstration purposes only.
- The user could implement their switching logic based on this example.
- In this example, only one physical port on the host is supported.
- Multiple segments are not supported. The biggest mbuf supported is 4KB.
- When the back end is restarted, all front ends must also be restarted.
-
-Running the Application
------------------------
-
-The following describes the steps required to run the application.
-
-Validated Environment
-~~~~~~~~~~~~~~~~~~~~~
-
-Host:
-
- Xen-hypervisor: 4.2.2
-
- Distribution: Fedora release 18
-
- Kernel: 3.10.0
-
- Xen development package (including Xen, Xen-libs, xen-devel): 4.2.3
-
-Guest:
-
- Distribution: Fedora 16 and 18
-
- Kernel: 3.6.11
-
-Xen Host Prerequisites
-~~~~~~~~~~~~~~~~~~~~~~
-
-Note that the following commands might not be the same on different Linux* distributions.
-
-* Install xen-devel package:
-
- .. code-block:: console
-
- yum install xen-devel.x86_64
-
-* Start xend if not already started:
-
- .. code-block:: console
-
- /etc/init.d/xend start
-
-* Mount xenfs if not already mounted:
-
- .. code-block:: console
-
- mount -t xenfs none /proc/xen
-
-* Enlarge the limit for xen_gntdev driver:
-
- .. code-block:: console
-
- modprobe -r xen_gntdev
- modprobe xen_gntdev limit=1000000
-
-.. note::
-
- The default limit for earlier versions of the xen_gntdev driver is 1024.
- That is insufficient to support the mapping of multiple Virtio devices into multiple VMs,
- so it is necessary to enlarge the limit by reloading this module.
- The default limit of recent versions of xen_gntdev is 1048576.
- The rough calculation of this limit is:
-
- limit=nb_mbuf# * VM#.
-
- In DPDK examples, nb_mbuf# is normally 8192.
-
-Building and Running the Switching Backend
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#. Edit config/common_linuxapp, and change the default configuration value for the following two items:
-
- .. code-block:: console
-
- CONFIG_RTE_LIBRTE_XEN_DOM0=y
- CONFIG RTE_LIBRTE_PMD_XENVIRT=n
-
-#. Build the target:
-
- .. code-block:: console
-
- make install T=x86_64-native-linuxapp-gcc
-
-#. Ensure that RTE_SDK and RTE_TARGET are correctly set. Build the switching example:
-
- .. code-block:: console
-
- make -C examples/vhost_xen/
-
-#. Load the Xen DPDK memory management module and preallocate memory:
-
- .. code-block:: console
-
- insmod ./x86_64-native-linuxapp-gcc/build/lib/librte_eal/linuxapp/xen_dom0/rte_dom0_mm.ko
- echo 2048> /sys/kernel/mm/dom0-mm/memsize-mB/memsize
-
- .. note::
-
- On Xen Dom0, there is no hugepage support.
- Under Xen Dom0, the DPDK uses a special memory management kernel module
- to allocate chunks of physically continuous memory.
- Refer to the *DPDK Getting Started Guide* for more information on memory management in the DPDK.
- In the above command, 4 GB memory is reserved (2048 of 2 MB pages) for DPDK.
-
-#. Load uio_pci_generic and bind one Intel NIC controller to it:
-
- .. code-block:: console
-
- modprobe uio_pci_generic
- python usertools/dpdk-devbind.py -b uio_pci_generic 0000:09:00:00.0
-
- In this case, 0000:09:00.0 is the PCI address for the NIC controller.
-
-#. Run the switching back end example:
-
- .. code-block:: console
-
- examples/vhost_xen/build/vhost-switch -l 0-3 -n 3 --xen-dom0 -- -p1
-
-.. note::
-
- The -xen-dom0 option instructs the DPDK to use the Xen kernel module to allocate memory.
-
-Other Parameters:
-
-* -vm2vm
-
- The vm2vm parameter enables/disables packet switching in software.
- Disabling vm2vm implies that on a VM packet transmission will always go to the Ethernet port
- and will not be switched to another VM
-
-* -Stats
-
- The Stats parameter controls the printing of Virtio-net device statistics.
- The parameter specifies the interval (in seconds) at which to print statistics,
- an interval of 0 seconds will disable printing statistics.
-
-Xen PMD Frontend Prerequisites
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#. Install xen-devel package for accessing XenStore:
-
- .. code-block:: console
-
- yum install xen-devel.x86_64
-
-#. Mount xenfs, if it is not already mounted:
-
- .. code-block:: console
-
- mount -t xenfs none /proc/xen
-
-#. Enlarge the default limit for xen_gntalloc driver:
-
- .. code-block:: console
-
- modprobe -r xen_gntalloc
- modprobe xen_gntalloc limit=6000
-
-.. note::
-
- Before the Linux kernel version 3.8-rc5, Jan 15th 2013,
- a critical defect occurs when a guest is heavily allocating Grant pages.
- The Grant driver allocates fewer pages than expected which causes kernel memory corruption.
- This happens, for example, when a guest uses the v1 format of a Grant table entry and allocates
- more than 8192 Grant pages (this number might be different on different hypervisor versions).
- To work around this issue, set the limit for gntalloc driver to 6000.
- (The kernel normally allocates hundreds of Grant pages with one Xen front end per virtualized device).
- If the kernel allocates a lot of Grant pages, for example, if the user uses multiple net front devices,
- it is best to upgrade the Grant alloc driver.
- This defect has been fixed in kernel version 3.8-rc5 and later.
-
-Building and Running the Front End
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-#. Edit config/common_linuxapp, and change the default configuration value:
-
- .. code-block:: console
-
- CONFIG_RTE_LIBRTE_XEN_DOM0=n
- CONFIG_RTE_LIBRTE_PMD_XENVIRT=y
-
-#. Build the package:
-
- .. code-block:: console
-
- make install T=x86_64-native-linuxapp-gcc
-
-#. Enable hugepages. Refer to the *DPDK Getting Started Guide* for instructions on
- how to use hugepages in the DPDK.
-
-#. Run TestPMD. Refer to *DPDK TestPMD Application User Guide* for detailed parameter usage.
-
- .. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11"
- testpmd>set fwd mac
- testpmd>start
-
- As an example to run two TestPMD instances over 2 Xen Virtio devices:
-
- .. code-block:: console
-
- --vdev="net_xenvirt0,mac=00:00:00:00:00:11" --vdev="net_xenvirt1;mac=00:00:00:00:00:22"
-
-
-Usage Examples: Injecting a Packet Stream Using a Packet Generator
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Loopback Mode
-^^^^^^^^^^^^^
-
-Run TestPMD in a guest VM:
-
-.. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22
- testpmd> set fwd mac
- testpmd> start
-
-Example output of the vhost_switch would be:
-
-.. code-block:: console
-
- DATA:(0) MAC_ADDRESS 00:00:00:00:00:11 and VLAN_TAG 1000 registered.
-
-The above message indicates that device 0 has been registered with MAC address 00:00:00:00:00:11 and VLAN tag 1000.
-Any packets received on the NIC with these values is placed on the device's receive queue.
-
-Configure a packet stream in the packet generator, set the destination MAC address to 00:00:00:00:00:11, and VLAN to 1000,
-the guest Virtio receives these packets and sends them out with destination MAC address 00:00:00:00:00:22.
-
-Inter-VM Mode
-^^^^^^^^^^^^^
-
-Run TestPMD in guest VM1:
-
-.. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22 -- -i
-
-Run TestPMD in guest VM2:
-
-.. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:22" -- -i --eth-peer=0,00:00:00:00:00:33
-
-Configure a packet stream in the packet generator, and set the destination MAC address to 00:00:00:00:00:11 and VLAN to 1000.
-The packets received in Virtio in guest VM1 will be forwarded to Virtio in guest VM2 and
-then sent out through hardware with destination MAC address 00:00:00:00:00:33.
-
-The packet flow is:
-
-packet generator->Virtio in guest VM1->switching backend->Virtio in guest VM2->switching backend->wire
diff --git a/drivers/Makefile b/drivers/Makefile
index 7fef66d7..db0cd76e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -37,8 +37,8 @@ DEPDIRS-mempool := bus
DIRS-y += net
DEPDIRS-net := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
-DEPDIRS-crypto := mempool
+DEPDIRS-crypto := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
-DEPDIRS-event := bus
+DEPDIRS-event := bus mempool net
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 02242148..c20beb43 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -30,9 +30,9 @@
include $(RTE_SDK)/mk/rte.vars.mk
-core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
-
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
-DEPDIRS-fslmc = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
+DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
new file mode 100644
index 00000000..f672f540
--- /dev/null
+++ b/drivers/bus/dpaa/Makefile
@@ -0,0 +1,78 @@
+# BSD LICENSE
+#
+# Copyright 2016 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
+
+#
+# library name
+#
+LIB = librte_bus_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -Wno-cast-qual
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -I$(RTE_BUS_DPAA)/
+CFLAGS += -I$(RTE_BUS_DPAA)/include
+CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+
+# versioning export map
+EXPORT_MAP := rte_bus_dpaa_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ dpaa_bus.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \
+ base/fman/fman.c \
+ base/fman/fman_hw.c \
+ base/fman/of.c \
+ base/fman/netcfg_layer.c \
+ base/qbman/process.c \
+ base/qbman/bman.c \
+ base/qbman/bman_driver.c \
+ base/qbman/qman.c \
+ base/qbman/qman_driver.c \
+ base/qbman/dpaa_alloc.c \
+ base/qbman/dpaa_sys.c
+
+# Link Pthread
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
new file mode 100644
index 00000000..3816dba5
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -0,0 +1,612 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+
+/* This header declares the driver interface we implement */
+#include <fman.h>
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+#define QMI_PORT_REGS_OFFSET 0x400
+
+/* CCSR map address to access ccsr based register */
+void *fman_ccsr_map;
+/* fman version info */
+u16 fman_ip_rev;
+static int get_once;
+u32 fman_dealloc_bufs_mask_hi;
+u32 fman_dealloc_bufs_mask_lo;
+
+int fman_ccsr_map_fd = -1;
+static COMPAT_LIST_HEAD(__ifs);
+
+/* This is the (const) global variable that callers have read-only access to.
+ * Internally, we have read-write access directly to __ifs.
+ */
+const struct list_head *fman_if_list = &__ifs;
+
+static void
+if_destructor(struct __fman_if *__if)
+{
+ struct fman_if_bpool *bp, *tmpbp;
+
+ if (!__if)
+ return;
+
+ if (__if->__if.mac_type == fman_offline)
+ goto cleanup;
+
+ list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+cleanup:
+ free(__if);
+}
+
+static int
+fman_get_ip_rev(const struct device_node *fman_node)
+{
+ const uint32_t *fman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ uint32_t ip_rev_1;
+ int _errno;
+
+ fman_addr = of_get_address(fman_node, 0, &regs_size, NULL);
+ if (!fman_addr) {
+ pr_err("of_get_address cannot return fman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(fman_node, fman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+ fman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fman_ccsr_map_fd, phys_addr);
+ if (fman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map FMan ccsr base");
+ return -EINVAL;
+ }
+
+ ip_rev_1 = in_be32(fman_ccsr_map + FMAN_IP_REV_1);
+ fman_ip_rev = (ip_rev_1 & FMAN_IP_REV_1_MAJOR_MASK) >>
+ FMAN_IP_REV_1_MAJOR_SHIFT;
+
+ _errno = munmap(fman_ccsr_map, regs_size);
+ if (_errno)
+ pr_err("munmap() of FMan ccsr failed");
+
+ return 0;
+}
+
+static int
+fman_get_mac_index(uint64_t regs_addr_host, uint8_t *mac_idx)
+{
+ int ret = 0;
+
+ /*
+ * MAC1 : E_0000h
+ * MAC2 : E_2000h
+ * MAC3 : E_4000h
+ * MAC4 : E_6000h
+ * MAC5 : E_8000h
+ * MAC6 : E_A000h
+ * MAC7 : E_C000h
+ * MAC8 : E_E000h
+ * MAC9 : F_0000h
+ * MAC10: F_2000h
+ */
+ switch (regs_addr_host) {
+ case 0xE0000:
+ *mac_idx = 1;
+ break;
+ case 0xE2000:
+ *mac_idx = 2;
+ break;
+ case 0xE4000:
+ *mac_idx = 3;
+ break;
+ case 0xE6000:
+ *mac_idx = 4;
+ break;
+ case 0xE8000:
+ *mac_idx = 5;
+ break;
+ case 0xEA000:
+ *mac_idx = 6;
+ break;
+ case 0xEC000:
+ *mac_idx = 7;
+ break;
+ case 0xEE000:
+ *mac_idx = 8;
+ break;
+ case 0xF0000:
+ *mac_idx = 9;
+ break;
+ case 0xF2000:
+ *mac_idx = 10;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+fman_if_init(const struct device_node *dpa_node)
+{
+ const char *rprop, *mprop;
+ uint64_t phys_addr;
+ struct __fman_if *__if;
+ struct fman_if_bpool *bpool;
+
+ const phandle *mac_phandle, *ports_phandle, *pools_phandle;
+ const phandle *tx_channel_id = NULL, *mac_addr, *cell_idx;
+ const phandle *rx_phandle, *tx_phandle;
+ uint64_t tx_phandle_host[4] = {0};
+ uint64_t rx_phandle_host[4] = {0};
+ uint64_t regs_addr_host = 0;
+ uint64_t cell_idx_host = 0;
+
+ const struct device_node *mac_node = NULL, *tx_node;
+ const struct device_node *pool_node, *fman_node, *rx_node;
+ const uint32_t *regs_addr = NULL;
+ const char *mname, *fname;
+ const char *dname = dpa_node->full_name;
+ size_t lenp;
+ int _errno;
+ const char *char_prop;
+ uint32_t na;
+
+ if (of_device_is_available(dpa_node) == false)
+ return 0;
+
+ rprop = "fsl,qman-frame-queues-rx";
+ mprop = "fsl,fman-mac";
+
+ /* Allocate an object for this network interface */
+ __if = malloc(sizeof(*__if));
+ if (!__if) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*__if));
+ goto err;
+ }
+ memset(__if, 0, sizeof(*__if));
+ INIT_LIST_HEAD(&__if->__if.bpool_list);
+ strncpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
+ __if->node_path[PATH_MAX - 1] = '\0';
+
+ /* Obtain the MAC node used by this interface except macless */
+ mac_phandle = of_get_property(dpa_node, mprop, &lenp);
+ if (!mac_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no %s\n", dname, mprop);
+ goto err;
+ }
+ assert(lenp == sizeof(phandle));
+ mac_node = of_find_node_by_phandle(*mac_phandle);
+ if (!mac_node) {
+ FMAN_ERR(-ENXIO, "%s: bad 'fsl,fman-mac\n", dname);
+ goto err;
+ }
+ mname = mac_node->full_name;
+
+ /* Map the CCSR regs for the MAC node */
+ regs_addr = of_get_address(mac_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(mac_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->ccsr_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->ccsr_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ regs_addr_host = of_read_number(regs_addr, na);
+
+
+ /* Get the index of the Fman this i/f belongs to */
+ fman_node = of_get_parent(mac_node);
+ na = of_n_addr_cells(mac_node);
+ if (!fman_node) {
+ FMAN_ERR(-ENXIO, "of_get_parent(%s)\n", mname);
+ goto err;
+ }
+ fname = fman_node->full_name;
+ cell_idx = of_get_property(fman_node, "cell-index", &lenp);
+ if (!cell_idx) {
+ FMAN_ERR(-ENXIO, "%s: no cell-index)\n", fname);
+ goto err;
+ }
+ assert(lenp == sizeof(*cell_idx));
+ cell_idx_host = of_read_number(cell_idx, lenp / sizeof(phandle));
+ __if->__if.fman_idx = cell_idx_host;
+ if (!get_once) {
+ _errno = fman_get_ip_rev(fman_node);
+ if (_errno) {
+ FMAN_ERR(-ENXIO, "%s: ip_rev is not available\n",
+ fname);
+ goto err;
+ }
+ }
+
+ if (fman_ip_rev >= FMAN_V3) {
+ /*
+ * Set A2V, OVOM, EBD bits in contextA to allow external
+ * buffer deallocation by fman.
+ */
+ fman_dealloc_bufs_mask_hi = FMAN_V3_CONTEXTA_EN_A2V |
+ FMAN_V3_CONTEXTA_EN_OVOM;
+ fman_dealloc_bufs_mask_lo = FMAN_V3_CONTEXTA_EN_EBD;
+ } else {
+ fman_dealloc_bufs_mask_hi = 0;
+ fman_dealloc_bufs_mask_lo = 0;
+ }
+ /* Is the MAC node 1G, 10G? */
+ __if->__if.is_memac = 0;
+
+ if (of_device_is_compatible(mac_node, "fsl,fman-1g-mac"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-10g-mac"))
+ __if->__if.mac_type = fman_mac_10g;
+ else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+ __if->__if.is_memac = 1;
+ char_prop = of_get_property(mac_node, "phy-connection-type",
+ NULL);
+ if (!char_prop) {
+ printf("memac: unknown MII type assuming 1G\n");
+ /* Right now forcing memac to 1g in case of error*/
+ __if->__if.mac_type = fman_mac_1g;
+ } else {
+ if (strstr(char_prop, "sgmii"))
+ __if->__if.mac_type = fman_mac_1g;
+ else if (strstr(char_prop, "rgmii")) {
+ __if->__if.mac_type = fman_mac_1g;
+ __if->__if.is_rgmii = 1;
+ } else if (strstr(char_prop, "xgmii"))
+ __if->__if.mac_type = fman_mac_10g;
+ }
+ } else {
+ FMAN_ERR(-EINVAL, "%s: unknown MAC type\n", mname);
+ goto err;
+ }
+
+ /*
+ * For MAC ports, we cannot rely on cell-index. In
+ * T2080, two of the 10G ports on single FMAN have same
+ * duplicate cell-indexes as the other two 10G ports on
+ * same FMAN. Hence, we now rely upon addresses of the
+ * ports from device tree to deduce the index.
+ */
+
+ _errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
+ if (_errno) {
+ FMAN_ERR(-EINVAL, "Invalid register address: %lu",
+ regs_addr_host);
+ goto err;
+ }
+
+ /* Extract the MAC address for private and shared interfaces */
+ mac_addr = of_get_property(mac_node, "local-mac-address",
+ &lenp);
+ if (!mac_addr) {
+ FMAN_ERR(-EINVAL, "%s: no local-mac-address\n",
+ mname);
+ goto err;
+ }
+ memcpy(&__if->__if.mac_addr, mac_addr, ETHER_ADDR_LEN);
+
+ /* Extract the Tx port (it's the second of the two port handles)
+ * and get its channel ID
+ */
+ ports_phandle = of_get_property(mac_node, "fsl,port-handles",
+ &lenp);
+ if (!ports_phandle)
+ ports_phandle = of_get_property(mac_node, "fsl,fman-ports",
+ &lenp);
+ if (!ports_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,port-handles\n",
+ mname);
+ goto err;
+ }
+ assert(lenp == (2 * sizeof(phandle)));
+ tx_node = of_find_node_by_phandle(ports_phandle[1]);
+ if (!tx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[1]\n", mname);
+ goto err;
+ }
+ /* Extract the channel ID (from tx-port-handle) */
+ tx_channel_id = of_get_property(tx_node, "fsl,qman-channel-id",
+ &lenp);
+ if (!tx_channel_id) {
+ FMAN_ERR(-EINVAL, "%s: no fsl-qman-channel-id\n",
+ tx_node->full_name);
+ goto err;
+ }
+
+ rx_node = of_find_node_by_phandle(ports_phandle[0]);
+ if (!rx_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,port-handle[0]\n", mname);
+ goto err;
+ }
+ regs_addr = of_get_address(rx_node, 0, &__if->regs_size, NULL);
+ if (!regs_addr) {
+ FMAN_ERR(-EINVAL, "of_get_address(%s)\n", mname);
+ goto err;
+ }
+ phys_addr = of_translate_address(rx_node, regs_addr);
+ if (!phys_addr) {
+ FMAN_ERR(-EINVAL, "of_translate_address(%s, %p)\n",
+ mname, regs_addr);
+ goto err;
+ }
+ __if->bmi_map = mmap(NULL, __if->regs_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ fman_ccsr_map_fd, phys_addr);
+ if (__if->bmi_map == MAP_FAILED) {
+ FMAN_ERR(-errno, "mmap(0x%"PRIx64")\n", phys_addr);
+ goto err;
+ }
+
+ /* No channel ID for MAC-less */
+ assert(lenp == sizeof(*tx_channel_id));
+ na = of_n_addr_cells(mac_node);
+ __if->__if.tx_channel_id = of_read_number(tx_channel_id, na);
+
+ /* Extract the Rx FQIDs. (Note, the device representation is silly,
+ * there are "counts" that must always be 1.)
+ */
+ rx_phandle = of_get_property(dpa_node, rprop, &lenp);
+ if (!rx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-rx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ rx_phandle_host[0] = of_read_number(&rx_phandle[0], na);
+ rx_phandle_host[1] = of_read_number(&rx_phandle[1], na);
+ rx_phandle_host[2] = of_read_number(&rx_phandle[2], na);
+ rx_phandle_host[3] = of_read_number(&rx_phandle[3], na);
+
+ assert((rx_phandle_host[1] == 1) && (rx_phandle_host[3] == 1));
+ __if->__if.fqid_rx_err = rx_phandle_host[0];
+ __if->__if.fqid_rx_def = rx_phandle_host[2];
+
+ /* Extract the Tx FQIDs */
+ tx_phandle = of_get_property(dpa_node,
+ "fsl,qman-frame-queues-tx", &lenp);
+ if (!tx_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,qman-frame-queues-tx\n", dname);
+ goto err;
+ }
+
+ assert(lenp == (4 * sizeof(phandle)));
+ /*TODO: Fix for other cases also */
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues). Convert to host byte order */
+ tx_phandle_host[0] = of_read_number(&tx_phandle[0], na);
+ tx_phandle_host[1] = of_read_number(&tx_phandle[1], na);
+ tx_phandle_host[2] = of_read_number(&tx_phandle[2], na);
+ tx_phandle_host[3] = of_read_number(&tx_phandle[3], na);
+ assert((tx_phandle_host[1] == 1) && (tx_phandle_host[3] == 1));
+ __if->__if.fqid_tx_err = tx_phandle_host[0];
+ __if->__if.fqid_tx_confirm = tx_phandle_host[2];
+
+ /* Obtain the buffer pool nodes used by this interface */
+ pools_phandle = of_get_property(dpa_node, "fsl,bman-buffer-pools",
+ &lenp);
+ if (!pools_phandle) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bman-buffer-pools\n", dname);
+ goto err;
+ }
+ /* For each pool, parse the corresponding node and add a pool object
+ * to the interface's "bpool_list"
+ */
+ assert(lenp && !(lenp % sizeof(phandle)));
+ while (lenp) {
+ size_t proplen;
+ const phandle *prop;
+ uint64_t bpid_host = 0;
+ uint64_t bpool_host[6] = {0};
+ const char *pname;
+ /* Allocate an object for the pool */
+ bpool = malloc(sizeof(*bpool));
+ if (!bpool) {
+ FMAN_ERR(-ENOMEM, "malloc(%zu)\n", sizeof(*bpool));
+ goto err;
+ }
+ /* Find the pool node */
+ pool_node = of_find_node_by_phandle(*pools_phandle);
+ if (!pool_node) {
+ FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
+ dname);
+ goto err;
+ }
+ pname = pool_node->full_name;
+ /* Extract the BPID property */
+ prop = of_get_property(pool_node, "fsl,bpid", &proplen);
+ if (!prop) {
+ FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ goto err;
+ }
+ assert(proplen == sizeof(*prop));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte-order
+ */
+ bpid_host = of_read_number(prop, na);
+ bpool->bpid = bpid_host;
+ /* Extract the cfg property (count/size/addr). "fsl,bpool-cfg"
+ * indicates for the Bman driver to seed the pool.
+ * "fsl,bpool-ethernet-cfg" is used by the network driver. The
+ * two are mutually exclusive, so check for either of them.
+ */
+ prop = of_get_property(pool_node, "fsl,bpool-cfg",
+ &proplen);
+ if (!prop)
+ prop = of_get_property(pool_node,
+ "fsl,bpool-ethernet-cfg",
+ &proplen);
+ if (!prop) {
+ /* It's OK for there to be no bpool-cfg */
+ bpool->count = bpool->size = bpool->addr = 0;
+ } else {
+ assert(proplen == (6 * sizeof(*prop)));
+ na = of_n_addr_cells(mac_node);
+ /* Get rid of endianness (issues).
+ * Convert to host byte order
+ */
+ bpool_host[0] = of_read_number(&prop[0], na);
+ bpool_host[1] = of_read_number(&prop[1], na);
+ bpool_host[2] = of_read_number(&prop[2], na);
+ bpool_host[3] = of_read_number(&prop[3], na);
+ bpool_host[4] = of_read_number(&prop[4], na);
+ bpool_host[5] = of_read_number(&prop[5], na);
+
+ bpool->count = ((uint64_t)bpool_host[0] << 32) |
+ bpool_host[1];
+ bpool->size = ((uint64_t)bpool_host[2] << 32) |
+ bpool_host[3];
+ bpool->addr = ((uint64_t)bpool_host[4] << 32) |
+ bpool_host[5];
+ }
+ /* Parsing of the pool is complete, add it to the interface
+ * list.
+ */
+ list_add_tail(&bpool->node, &__if->__if.bpool_list);
+ lenp -= sizeof(phandle);
+ pools_phandle++;
+ }
+
+ /* Parsing of the network interface is complete, add it to the list */
+ DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
+ "Port ID = %x\n",
+ dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
+ __if->__if.mac_idx);
+
+ list_add_tail(&__if->__if.node, &__ifs);
+ return 0;
+err:
+ if_destructor(__if);
+ return _errno;
+}
+
+int
+fman_init(void)
+{
+ const struct device_node *dpa_node;
+ int _errno;
+
+ /* If multiple dependencies try to initialise the Fman driver, don't
+ * panic.
+ */
+ if (fman_ccsr_map_fd != -1)
+ return 0;
+
+ fman_ccsr_map_fd = open(FMAN_DEVICE_PATH, O_RDWR);
+ if (unlikely(fman_ccsr_map_fd < 0)) {
+ DPAA_BUS_LOG(ERR, "Unable to open (/dev/mem)");
+ return fman_ccsr_map_fd;
+ }
+
+ for_each_compatible_node(dpa_node, NULL, "fsl,dpa-ethernet-init") {
+ _errno = fman_if_init(dpa_node);
+ if (_errno) {
+ FMAN_ERR(_errno, "if_init(%s)\n", dpa_node->full_name);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ fman_finish();
+ return _errno;
+}
+
+void
+fman_finish(void)
+{
+ struct __fman_if *__if, *tmpif;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ list_for_each_entry_safe(__if, tmpif, &__ifs, __if.node) {
+ int _errno;
+
+ /* disable Rx and Tx */
+ if ((__if->__if.mac_type == fman_mac_1g) &&
+ (!__if->__if.is_memac))
+ out_be32(__if->ccsr_map + 0x100,
+ in_be32(__if->ccsr_map + 0x100) & ~(u32)0x5);
+ else
+ out_be32(__if->ccsr_map + 8,
+ in_be32(__if->ccsr_map + 8) & ~(u32)3);
+ /* release the mapping */
+ _errno = munmap(__if->ccsr_map, __if->regs_size);
+ if (unlikely(_errno < 0))
+ fprintf(stderr, "%s:%hu:%s(): munmap() = %d (%s)\n",
+ __FILE__, __LINE__, __func__,
+ -errno, strerror(errno));
+ printf("Tearing down %s\n", __if->node_path);
+ list_del(&__if->__if.node);
+ free(__if);
+ }
+
+ close(fman_ccsr_map_fd);
+ fman_ccsr_map_fd = -1;
+}
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
new file mode 100644
index 00000000..077c17c0
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -0,0 +1,590 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <ifaddrs.h>
+#include <fman.h>
+/* This header declares things about Fman hardware itself (the format of status
+ * words and an inline implementation of CRC64). We include it only in order to
+ * instantiate the one global variable it depends on.
+ */
+#include <fsl_fman.h>
+#include <fsl_fman_crc64.h>
+#include <fsl_bman.h>
+
+/* Instantiate the global variable that the inline CRC64 implementation (in
+ * <fsl_fman.h>) depends on.
+ */
+DECLARE_FMAN_CRC64_TABLE();
+
+#define ETH_ADDR_TO_UINT64(eth_addr) \
+ (uint64_t)(((uint64_t)(eth_addr)[0] << 40) | \
+ ((uint64_t)(eth_addr)[1] << 32) | \
+ ((uint64_t)(eth_addr)[2] << 24) | \
+ ((uint64_t)(eth_addr)[3] << 16) | \
+ ((uint64_t)(eth_addr)[4] << 8) | \
+ ((uint64_t)(eth_addr)[5]))
+
+void
+fman_if_set_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
+}
+
+void
+fman_if_reset_mcast_filter_table(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *hashtable_ctrl;
+ uint32_t i;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ for (i = 0; i < 64; i++)
+ out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
+}
+
+static
+uint32_t get_mac_hash_code(uint64_t eth_addr)
+{
+ uint64_t mask1, mask2;
+ uint32_t xorVal = 0;
+ uint8_t i, j;
+
+ for (i = 0; i < 6; i++) {
+ mask1 = eth_addr & (uint64_t)0x01;
+ eth_addr >>= 1;
+
+ for (j = 0; j < 7; j++) {
+ mask2 = eth_addr & (uint64_t)0x01;
+ mask1 ^= mask2;
+ eth_addr >>= 1;
+ }
+
+ xorVal |= (mask1 << (5 - i));
+ }
+
+ return xorVal;
+}
+
+int
+fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ uint64_t eth_addr;
+ void *hashtable_ctrl;
+ uint32_t hash;
+
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ eth_addr = ETH_ADDR_TO_UINT64(eth);
+
+ if (!(eth_addr & GROUP_ADDRESS))
+ return -1;
+
+ hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
+ hash = hash | HASH_CTRL_MCAST_EN;
+
+ hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
+ out_be32(hashtable_ctrl, hash);
+
+ return 0;
+}
+
+int
+fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *mac_reg =
+ &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
+ u32 val = in_be32(mac_reg);
+
+ eth[0] = (val & 0x000000ff) >> 0;
+ eth[1] = (val & 0x0000ff00) >> 8;
+ eth[2] = (val & 0x00ff0000) >> 16;
+ eth[3] = (val & 0xff000000) >> 24;
+
+ mac_reg = &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
+ val = in_be32(mac_reg);
+
+ eth[4] = (val & 0x000000ff) >> 0;
+ eth[5] = (val & 0x0000ff00) >> 8;
+
+ return 0;
+}
+
+void
+fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ void *reg;
+
+ if (addr_num) {
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ out_be32(reg, 0x0);
+ } else {
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+ out_be32(reg, 0x0);
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+ out_be32(reg, 0x0);
+ }
+}
+
+int
+fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+
+ void *reg;
+ u32 val;
+
+ memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_l;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
+
+ val = (m->__if.mac_addr.addr_bytes[0] |
+ (m->__if.mac_addr.addr_bytes[1] << 8) |
+ (m->__if.mac_addr.addr_bytes[2] << 16) |
+ (m->__if.mac_addr.addr_bytes[3] << 24));
+ out_be32(reg, val);
+
+ if (addr_num)
+ reg = &((struct memac_regs *)m->ccsr_map)->
+ mac_addr[addr_num-1].mac_addr_u;
+ else
+ reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
+
+ val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
+ (m->__if.mac_addr.addr_bytes[5] << 8));
+ out_be32(reg, val);
+
+ return 0;
+}
+
+void
+fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ u32 value = 0;
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Rx Ignore Pause Frames */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ if (enable)
+ value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
+ else
+ value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
+
+ out_be32(cmdcfg, value);
+}
+
+void
+fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ unsigned int *maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Set Max frame length */
+ maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+ out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
+}
+
+void
+fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+
+ /* read recved packet count */
+ stats->ipackets = ((u64)in_be32(&regs->rfrm_u)) << 32 |
+ in_be32(&regs->rfrm_l);
+ stats->ibytes = ((u64)in_be32(&regs->roct_u)) << 32 |
+ in_be32(&regs->roct_l);
+ stats->ierrors = ((u64)in_be32(&regs->rerr_u)) << 32 |
+ in_be32(&regs->rerr_l);
+
+ /* read xmited packet count */
+ stats->opackets = ((u64)in_be32(&regs->tfrm_u)) << 32 |
+ in_be32(&regs->tfrm_l);
+ stats->obytes = ((u64)in_be32(&regs->toct_u)) << 32 |
+ in_be32(&regs->toct_l);
+ stats->oerrors = ((u64)in_be32(&regs->terr_u)) << 32 |
+ in_be32(&regs->terr_l);
+}
+
+void
+fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ int i;
+ uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
+
+ for (i = 0; i < n; i++)
+ value[i] = ((u64)in_be32((char *)regs
+ + base_offset + 8 * i + 4)) << 32 |
+ ((u64)in_be32((char *)regs
+ + base_offset + 8 * i));
+}
+
+void
+fman_if_stats_reset(struct fman_if *p)
+{
+ struct __fman_if *m = container_of(p, struct __fman_if, __if);
+ struct memac_regs *regs = m->ccsr_map;
+ uint32_t tmp;
+
+ tmp = in_be32(&regs->statn_config);
+
+ tmp |= STATS_CFG_CLR;
+
+ out_be32(&regs->statn_config, tmp);
+
+ while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
+ ;
+}
+
+void
+fman_if_promiscuous_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
+}
+
+void
+fman_if_promiscuous_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+ void *cmdcfg;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Disable Rx promiscuous mode */
+ cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
+}
+
+void
+fman_if_enable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* enable Rx and Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
+}
+
+void
+fman_if_disable_rx(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* only disable Rx, not Tx */
+ out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
+}
+
+void
+fman_if_loopback_enable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* Enable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
+ } else{
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_loopback_disable(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+ /* Disable loopback mode */
+ if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
+ unsigned int *ifmode =
+ &((struct memac_regs *)__if->ccsr_map)->if_mode;
+ out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
+ } else {
+ unsigned int *cmdcfg =
+ &((struct memac_regs *)__if->ccsr_map)->command_config;
+ out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
+ }
+}
+
+void
+fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
+ int bpid, size_t bufsize)
+{
+ u32 fmbm_ebmpi;
+ u32 ebmpi_val_ace = 0xc0000000;
+ u32 ebmpi_mask = 0xffc00000;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_ebmpi =
+ in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
+ fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
+ (bufsize);
+
+ out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
+ fmbm_ebmpi);
+}
+
+int
+fman_if_get_fc_threshold(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ return in_be32(fmbm_mpd);
+}
+
+int
+fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
+ u32 low_water, u32 bpid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_mpd;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
+ out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
+ return bm_pool_set_hw_threshold(bpid, low_water, high_water);
+
+}
+
+int
+fman_if_get_fc_quanta(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
+}
+
+int
+fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
+ pause_quanta);
+ return 0;
+}
+
+int
+fman_if_get_fdoff(struct fman_if *fm_if)
+{
+ u32 fmbm_ricp;
+ int fdoff;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_ricp =
+ in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp);
+ /*iceof + icsz*/
+ fdoff = ((fmbm_ricp & iceof_mask) >> 16) * 16 +
+ (fmbm_ricp & icsz_mask) * 16;
+
+ return fdoff;
+}
+
+void
+fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_refqid =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
+ out_be32(fmbm_refqid, err_fqid);
+}
+
+int
+fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ val = in_be32(fmbm_ricp);
+
+ icp->iceof = (val & iceof_mask) >> 12;
+ icp->iciof = (val & iciof_mask) >> 4;
+ icp->icsz = (val & icsz_mask) << 4;
+
+ return 0;
+}
+
+int
+fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ int val = 0;
+ int iceof_mask = 0x001f0000;
+ int icsz_mask = 0x0000001f;
+ int iciof_mask = 0x00000f00;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ val |= (icp->iceof << 12) & iceof_mask;
+ val |= (icp->iciof << 4) & iciof_mask;
+ val |= (icp->icsz >> 4) & icsz_mask;
+
+ unsigned int *fmbm_ricp =
+ &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
+ out_be32(fmbm_ricp, val);
+
+ return 0;
+}
+
+void
+fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, in_be32(fmbm_rebm) | (fd_offset << 16));
+}
+
+void
+fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
+}
+
+uint16_t
+fman_if_get_maxfrm(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *reg_maxfrm;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
+
+ return (in_be32(reg_maxfrm) | 0x0000FFFF);
+}
+
+void
+fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmqm_pndn;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
+
+ out_be32(fmqm_pndn, nia);
+}
+
+void
+fman_if_discard_rx_errors(struct fman_if *fm_if)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rfsdm, *fmbm_rfsem;
+
+ fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
+ out_be32(fmbm_rfsem, 0);
+
+ /* Configure the discard mask to discard the error packets which have
+ * DMA errors, Frame size error, Header error etc. The mask 0x010CE3F0
+ * is to configured discard all the errors which come in the FD[STATUS]
+ */
+ fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
+ out_be32(fmbm_rfsdm, 0x010CE3F0);
+}
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
new file mode 100644
index 00000000..26cff84a
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -0,0 +1,214 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <inttypes.h>
+#include <of.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <error.h>
+#include <net/if_arp.h>
+#include <assert.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+
+#include <rte_dpaa_logs.h>
+#include <netcfg.h>
+
+/* Structure contains information about all the interfaces given by user
+ * on command line.
+ */
+struct netcfg_interface *netcfg_interface;
+
+/* This data structure contaings all configurations information
+ * related to usages of DPA devices.
+ */
+struct netcfg_info *netcfg;
+/* fd to open a socket for making ioctl request to disable/enable shared
+ * interfaces.
+ */
+static int skfd = -1;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+void
+dump_netcfg(struct netcfg_info *cfg_ptr)
+{
+ int i;
+
+ printf(".......... DPAA Configuration ..........\n\n");
+
+ /* Network interfaces */
+ printf("Network interfaces: %d\n", cfg_ptr->num_ethports);
+ for (i = 0; i < cfg_ptr->num_ethports; i++) {
+ struct fman_if_bpool *bpool;
+ struct fm_eth_port_cfg *p_cfg = &cfg_ptr->port_cfg[i];
+ struct fman_if *__if = p_cfg->fman_if;
+
+ printf("\n+ Fman %d, MAC %d (%s);\n",
+ __if->fman_idx, __if->mac_idx,
+ (__if->mac_type == fman_mac_1g) ? "1G" : "10G");
+
+ printf("\tmac_addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (&__if->mac_addr)->addr_bytes[0],
+ (&__if->mac_addr)->addr_bytes[1],
+ (&__if->mac_addr)->addr_bytes[2],
+ (&__if->mac_addr)->addr_bytes[3],
+ (&__if->mac_addr)->addr_bytes[4],
+ (&__if->mac_addr)->addr_bytes[5]);
+
+ printf("\ttx_channel_id: 0x%02x\n",
+ __if->tx_channel_id);
+
+ printf("\tfqid_rx_def: 0x%x\n", p_cfg->rx_def);
+ printf("\tfqid_rx_err: 0x%x\n", __if->fqid_rx_err);
+
+ printf("\tfqid_tx_err: 0x%x\n", __if->fqid_tx_err);
+ printf("\tfqid_tx_confirm: 0x%x\n", __if->fqid_tx_confirm);
+ fman_if_for_each_bpool(bpool, __if)
+ printf("\tbuffer pool: (bpid=%d, count=%"PRId64
+ " size=%"PRId64", addr=0x%"PRIx64")\n",
+ bpool->bpid, bpool->count, bpool->size,
+ bpool->addr);
+ }
+}
+#endif /* RTE_LIBRTE_DPAA_DEBUG_DRIVER */
+
+static inline int
+get_num_netcfg_interfaces(char *str)
+{
+ char *pch;
+ uint8_t count = 0;
+
+ if (str == NULL)
+ return -EINVAL;
+ pch = strtok(str, ",");
+ while (pch != NULL) {
+ count++;
+ pch = strtok(NULL, ",");
+ }
+ return count;
+}
+
+struct netcfg_info *
+netcfg_acquire(void)
+{
+ struct fman_if *__if;
+ int _errno, idx = 0;
+ uint8_t num_ports = 0;
+ uint8_t num_cfg_ports = 0;
+ size_t size;
+
+ /* Extract dpa configuration from fman driver and FMC configuration
+ * for command-line interfaces.
+ */
+
+ /* Open a basic socket to enable/disable shared
+ * interfaces.
+ */
+ skfd = socket(AF_PACKET, SOCK_RAW, 0);
+ if (unlikely(skfd < 0)) {
+ error(0, errno, "%s(): open(SOCK_RAW)", __func__);
+ return NULL;
+ }
+
+ /* Initialise the Fman driver */
+ _errno = fman_init();
+ if (_errno) {
+ DPAA_BUS_LOG(ERR, "FMAN driver init failed (%d)", errno);
+ close(skfd);
+ skfd = -1;
+ return NULL;
+ }
+
+ /* Number of MAC ports */
+ list_for_each_entry(__if, fman_if_list, node)
+ num_ports++;
+
+ if (!num_ports) {
+ DPAA_BUS_LOG(ERR, "FMAN ports not available");
+ return NULL;
+ }
+ /* Allocate space for all enabled mac ports */
+ size = sizeof(*netcfg) +
+ (num_ports * sizeof(struct fm_eth_port_cfg));
+
+ netcfg = calloc(1, size);
+ if (unlikely(netcfg == NULL)) {
+ DPAA_BUS_LOG(ERR, "Unable to allocat mem for netcfg");
+ goto error;
+ }
+
+ netcfg->num_ethports = num_ports;
+
+ list_for_each_entry(__if, fman_if_list, node) {
+ struct fm_eth_port_cfg *cfg = &netcfg->port_cfg[idx];
+ /* Hook in the fman driver interface */
+ cfg->fman_if = __if;
+ cfg->rx_def = __if->fqid_rx_def;
+ num_cfg_ports++;
+ idx++;
+ }
+
+ if (!num_cfg_ports) {
+ DPAA_BUS_LOG(ERR, "No FMAN ports found");
+ goto error;
+ } else if (num_ports != num_cfg_ports)
+ netcfg->num_ethports = num_cfg_ports;
+
+ return netcfg;
+
+error:
+ if (netcfg) {
+ free(netcfg);
+ netcfg = NULL;
+ }
+
+ return NULL;
+}
+
+void
+netcfg_release(struct netcfg_info *cfg_ptr)
+{
+ free(cfg_ptr);
+ /* Close socket for shared interfaces */
+ if (skfd >= 0) {
+ close(skfd);
+ skfd = -1;
+ }
+}
diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c
new file mode 100644
index 00000000..b2d7c020
--- /dev/null
+++ b/drivers/bus/dpaa/base/fman/of.c
@@ -0,0 +1,576 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <of.h>
+#include <rte_dpaa_logs.h>
+
+static int alive;
+static struct dt_dir root_dir;
+static const char *base_dir;
+static COMPAT_LIST_HEAD(linear);
+
+static int
+of_open_dir(const char *relative_path, struct dirent ***d)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = scandir(full_path, d, 0, versionsort);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+of_close_dir(struct dirent **d, int num)
+{
+ while (num--)
+ free(d[num]);
+ free(d);
+}
+
+static int
+of_open_file(const char *relative_path)
+{
+ int ret;
+ char full_path[PATH_MAX];
+
+ snprintf(full_path, PATH_MAX, "%s/%s", base_dir, relative_path);
+ ret = open(full_path, O_RDONLY);
+ if (ret < 0)
+ DPAA_BUS_LOG(ERR, "Failed to open directory %s",
+ full_path);
+ return ret;
+}
+
+static void
+process_file(struct dirent *dent, struct dt_dir *parent)
+{
+ int fd;
+ struct dt_file *f = malloc(sizeof(*f));
+
+ if (!f) {
+ DPAA_BUS_LOG(DEBUG, "Unable to allocate memory for file node");
+ return;
+ }
+ f->node.is_file = 1;
+ snprintf(f->node.node.name, NAME_MAX, "%s", dent->d_name);
+ snprintf(f->node.node.full_name, PATH_MAX, "%s/%s",
+ parent->node.node.full_name, dent->d_name);
+ f->parent = parent;
+ fd = of_open_file(f->node.node.full_name);
+ if (fd < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to open file node");
+ free(f);
+ return;
+ }
+ f->len = read(fd, f->buf, OF_FILE_BUF_MAX);
+ close(fd);
+ if (f->len < 0) {
+ DPAA_BUS_LOG(DEBUG, "Unable to read file node");
+ free(f);
+ return;
+ }
+ list_add_tail(&f->node.list, &parent->files);
+}
+
+static const struct dt_dir *
+node2dir(const struct device_node *n)
+{
+ struct dt_node *dn = container_of((struct device_node *)n,
+ struct dt_node, node);
+ const struct dt_dir *d = container_of(dn, struct dt_dir, node);
+
+ assert(!dn->is_file);
+ return d;
+}
+
+/* process_dir() calls iterate_dir(), but the latter will also call the former
+ * when recursing into sub-directories, so a predeclaration is needed.
+ */
+static int process_dir(const char *relative_path, struct dt_dir *dt);
+
+static int
+iterate_dir(struct dirent **d, int num, struct dt_dir *dt)
+{
+ int loop;
+ /* Iterate the directory contents */
+ for (loop = 0; loop < num; loop++) {
+ struct dt_dir *subdir;
+ int ret;
+ /* Ignore dot files of all types (especially "..") */
+ if (d[loop]->d_name[0] == '.')
+ continue;
+ switch (d[loop]->d_type) {
+ case DT_REG:
+ process_file(d[loop], dt);
+ break;
+ case DT_DIR:
+ subdir = malloc(sizeof(*subdir));
+ if (!subdir) {
+ perror("malloc");
+ return -ENOMEM;
+ }
+ snprintf(subdir->node.node.name, NAME_MAX, "%s",
+ d[loop]->d_name);
+ snprintf(subdir->node.node.full_name, PATH_MAX,
+ "%s/%s", dt->node.node.full_name,
+ d[loop]->d_name);
+ subdir->parent = dt;
+ ret = process_dir(subdir->node.node.full_name, subdir);
+ if (ret)
+ return ret;
+ list_add_tail(&subdir->node.list, &dt->subdirs);
+ break;
+ default:
+ DPAA_BUS_LOG(DEBUG, "Ignoring invalid dt entry %s/%s",
+ dt->node.node.full_name, d[loop]->d_name);
+ }
+ }
+ return 0;
+}
+
+static int
+process_dir(const char *relative_path, struct dt_dir *dt)
+{
+ struct dirent **d;
+ int ret, num;
+
+ dt->node.is_file = 0;
+ INIT_LIST_HEAD(&dt->subdirs);
+ INIT_LIST_HEAD(&dt->files);
+ ret = of_open_dir(relative_path, &d);
+ if (ret < 0)
+ return ret;
+ num = ret;
+ ret = iterate_dir(d, num, dt);
+ of_close_dir(d, num);
+ return (ret < 0) ? ret : 0;
+}
+
+static void
+linear_dir(struct dt_dir *d)
+{
+ struct dt_file *f;
+ struct dt_dir *dd;
+
+ d->compatible = NULL;
+ d->status = NULL;
+ d->lphandle = NULL;
+ d->a_cells = NULL;
+ d->s_cells = NULL;
+ d->reg = NULL;
+ list_for_each_entry(f, &d->files, node.list) {
+ if (!strcmp(f->node.node.name, "compatible")) {
+ if (d->compatible)
+ DPAA_BUS_LOG(DEBUG, "Duplicate compatible in"
+ " %s", d->node.node.full_name);
+ d->compatible = f;
+ } else if (!strcmp(f->node.node.name, "status")) {
+ if (d->status)
+ DPAA_BUS_LOG(DEBUG, "Duplicate status in %s",
+ d->node.node.full_name);
+ d->status = f;
+ } else if (!strcmp(f->node.node.name, "linux,phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "#address-cells")) {
+ if (d->a_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
+ d->node.node.full_name);
+ d->a_cells = f;
+ } else if (!strcmp(f->node.node.name, "#size-cells")) {
+ if (d->s_cells)
+ DPAA_BUS_LOG(DEBUG, "Duplicate s_cells in %s",
+ d->node.node.full_name);
+ d->s_cells = f;
+ } else if (!strcmp(f->node.node.name, "reg")) {
+ if (d->reg)
+ DPAA_BUS_LOG(DEBUG, "Duplicate reg in %s",
+ d->node.node.full_name);
+ d->reg = f;
+ }
+ }
+
+ list_for_each_entry(dd, &d->subdirs, node.list) {
+ list_add_tail(&dd->linear, &linear);
+ linear_dir(dd);
+ }
+}
+
+int
+of_init_path(const char *dt_path)
+{
+ int ret;
+
+ base_dir = dt_path;
+
+ /* This needs to be singleton initialization */
+ DPAA_BUS_HWWARN(alive, "Double-init of device-tree driver!");
+
+ /* Prepare root node (the remaining fields are set in process_dir()) */
+ root_dir.node.node.name[0] = '\0';
+ root_dir.node.node.full_name[0] = '\0';
+ INIT_LIST_HEAD(&root_dir.node.list);
+ root_dir.parent = NULL;
+
+ /* Kick things off... */
+ ret = process_dir("", &root_dir);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to parse device tree");
+ return ret;
+ }
+
+ /* Now make a flat, linear list of directories */
+ linear_dir(&root_dir);
+ alive = 1;
+ return 0;
+}
+
+static void
+destroy_dir(struct dt_dir *d)
+{
+ struct dt_file *f, *tmpf;
+ struct dt_dir *dd, *tmpd;
+
+ list_for_each_entry_safe(f, tmpf, &d->files, node.list) {
+ list_del(&f->node.list);
+ free(f);
+ }
+ list_for_each_entry_safe(dd, tmpd, &d->subdirs, node.list) {
+ destroy_dir(dd);
+ list_del(&dd->node.list);
+ free(dd);
+ }
+}
+
+void
+of_finish(void)
+{
+ DPAA_BUS_HWWARN(!alive, "Double-finish of device-tree driver!");
+
+ destroy_dir(&root_dir);
+ INIT_LIST_HEAD(&linear);
+ alive = 0;
+}
+
+static const struct dt_dir *
+next_linear(const struct dt_dir *f)
+{
+ if (f->linear.next == &linear)
+ return NULL;
+ return list_entry(f->linear.next, struct dt_dir, linear);
+}
+
+static int
+check_compatible(const struct dt_file *f, const char *compatible)
+{
+ const char *c = (char *)f->buf;
+ unsigned int len, remains = f->len;
+
+ while (remains) {
+ len = strlen(c);
+ if (!strcmp(c, compatible))
+ return 1;
+
+ if (remains < len + 1)
+ break;
+
+ c += (len + 1);
+ remains -= (len + 1);
+ }
+ return 0;
+}
+
+const struct device_node *
+of_find_compatible_node(const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (list_empty(&linear))
+ return NULL;
+ if (!from)
+ d = list_entry(linear.next, struct dt_dir, linear);
+ else
+ d = node2dir(from);
+ for (d = next_linear(d); d && (!d->compatible ||
+ !check_compatible(d->compatible,
+ compatible));
+ d = next_linear(d))
+ ;
+ if (d)
+ return &d->node.node;
+ return NULL;
+}
+
+const void *
+of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp)
+{
+ const struct dt_dir *d;
+ const struct dt_file *f;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ d = node2dir(from);
+ list_for_each_entry(f, &d->files, node.list)
+ if (!strcmp(f->node.node.name, name)) {
+ if (lenp)
+ *lenp = f->len;
+ return f->buf;
+ }
+ return NULL;
+}
+
+bool
+of_device_is_available(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ d = node2dir(dev_node);
+ if (!d->status)
+ return true;
+ if (!strcmp((char *)d->status->buf, "okay"))
+ return true;
+ if (!strcmp((char *)d->status->buf, "ok"))
+ return true;
+ return false;
+}
+
+const struct device_node *
+of_find_node_by_phandle(phandle ph)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ list_for_each_entry(d, &linear, linear)
+ if (d->lphandle && (d->lphandle->len == 4) &&
+ !memcmp(d->lphandle->buf, &ph, 4))
+ return &d->node.node;
+ return NULL;
+}
+
+const struct device_node *
+of_get_parent(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ d = node2dir(dev_node);
+ if (!d->parent)
+ return NULL;
+ return &d->parent->node.node;
+}
+
+const struct device_node *
+of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev)
+{
+ const struct dt_dir *p, *c;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+
+ if (!dev_node)
+ return NULL;
+ p = node2dir(dev_node);
+ if (prev) {
+ c = node2dir(prev);
+ DPAA_BUS_HWWARN((c->parent != p), "Parent/child mismatch");
+ if (c->parent != p)
+ return NULL;
+ if (c->node.list.next == &p->subdirs)
+ /* prev was the last child */
+ return NULL;
+ c = list_entry(c->node.list.next, struct dt_dir, node.list);
+ return &c->node.node;
+ }
+ /* Return first child */
+ if (list_empty(&p->subdirs))
+ return NULL;
+ c = list_entry(p->subdirs.next, struct dt_dir, node.list);
+ return &c->node.node;
+}
+
+uint32_t
+of_n_addr_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->a_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->a_cells->buf[0];
+ assert(d->a_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NA;
+}
+
+uint32_t
+of_n_size_cells(const struct device_node *dev_node)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ return OF_DEFAULT_NA;
+ d = node2dir(dev_node);
+ while ((d = d->parent))
+ if (d->s_cells) {
+ unsigned char *buf =
+ (unsigned char *)&d->s_cells->buf[0];
+ assert(d->s_cells->len == 4);
+ return ((uint32_t)buf[0] << 24) |
+ ((uint32_t)buf[1] << 16) |
+ ((uint32_t)buf[2] << 8) |
+ (uint32_t)buf[3];
+ }
+ return OF_DEFAULT_NS;
+}
+
+const uint32_t *
+of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags __rte_unused)
+{
+ const struct dt_dir *d;
+ const unsigned char *buf;
+ uint32_t na = of_n_addr_cells(dev_node);
+ uint32_t ns = of_n_size_cells(dev_node);
+
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (!d->reg)
+ return NULL;
+ assert(d->reg->len % ((na + ns) * 4) == 0);
+ assert(d->reg->len / ((na + ns) * 4) > (unsigned int) idx);
+ buf = (const unsigned char *)&d->reg->buf[0];
+ buf += (na + ns) * idx * 4;
+ if (size)
+ for (*size = 0; ns > 0; ns--, na++)
+ *size = (*size << 32) +
+ (((uint32_t)buf[4 * na] << 24) |
+ ((uint32_t)buf[4 * na + 1] << 16) |
+ ((uint32_t)buf[4 * na + 2] << 8) |
+ (uint32_t)buf[4 * na + 3]);
+ return (const uint32_t *)buf;
+}
+
+uint64_t
+of_translate_address(const struct device_node *dev_node,
+ const uint32_t *addr)
+{
+ uint64_t phys_addr, tmp_addr;
+ const struct device_node *parent;
+ const uint32_t *ranges;
+ size_t rlen;
+ uint32_t na, pna;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ assert(dev_node != NULL);
+
+ na = of_n_addr_cells(dev_node);
+ phys_addr = of_read_number(addr, na);
+
+ dev_node = of_get_parent(dev_node);
+ if (!dev_node)
+ return 0;
+ else if (node2dir(dev_node) == &root_dir)
+ return phys_addr;
+
+ do {
+ pna = of_n_addr_cells(dev_node);
+ parent = of_get_parent(dev_node);
+ if (!parent)
+ return 0;
+
+ ranges = of_get_property(dev_node, "ranges", &rlen);
+ /* "ranges" property is missing. Translation breaks */
+ if (!ranges)
+ return 0;
+ /* "ranges" property is empty. Do 1:1 translation */
+ else if (rlen == 0)
+ continue;
+ else
+ tmp_addr = of_read_number(ranges + na, pna);
+
+ na = pna;
+ dev_node = parent;
+ phys_addr += tmp_addr;
+ } while (node2dir(parent) != &root_dir);
+
+ return phys_addr;
+}
+
+bool
+of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible)
+{
+ const struct dt_dir *d;
+
+ DPAA_BUS_HWWARN(!alive, "Device-tree driver not initialised!");
+ if (!dev_node)
+ d = &root_dir;
+ else
+ d = node2dir(dev_node);
+ if (d->compatible && check_compatible(d->compatible, compatible))
+ return true;
+ return false;
+}
diff --git a/drivers/bus/dpaa/base/qbman/bman.c b/drivers/bus/dpaa/base/qbman/bman.c
new file mode 100644
index 00000000..0480caa9
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -0,0 +1,394 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+#include <rte_branch_prediction.h>
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static RTE_DEFINE_PER_LCORE(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of
+ * the pool are operating via different portals.
+ */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_t in_use;
+#endif
+};
+
+static inline
+struct bman_portal *bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ const struct bman_depletion *pools = &c->mask;
+ int ret;
+ u8 bpid = 0;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ return NULL;
+ }
+ if (bm_mc_init(p)) {
+ pr_err("Bman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_mask(p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(p, portal->irq_sources);
+ bm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, NULL, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ pr_err("Bman RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_isr_disable_write(p, 0);
+ bm_isr_uninhibit(p);
+ return portal;
+fail_rcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->pools);
+fail_pools:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+ return NULL;
+}
+
+struct bman_portal *
+bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal = get_affine_portal();
+
+ /*This function is called from the context which is already affine to
+ *CPU or in other words this in non-migratable to other CPUs.
+ */
+ portal = bman_create_portal(portal, c);
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+ return portal;
+}
+
+static inline
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->irq, bm);
+
+ kfree(bm->pools);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+}
+
+const struct
+bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_affine_portal();
+ const struct bm_portal_config *pcfg;
+
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(pcfg->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ return pcfg;
+}
+
+int
+bman_get_portal_index(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ int ret = bman_alloc_bpid(&bpid);
+
+ if (ret)
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ int ret = bm_pool_set(bpid, params->thresholds);
+
+ if (ret)
+ goto err;
+ }
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->params = *params;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+
+ return pool;
+err:
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ kfree(pool);
+
+ return NULL;
+}
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+#define BMAN_BUF_MASK 0x0000fffffffffffful
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ u32 i = num - 1;
+ u8 avail;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+
+ p = get_affine_portal();
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ if (unlikely(!r))
+ return -EBUSY;
+
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ r->bufs[0].opaque =
+ cpu_to_be64(((u64)pool->params.bpid << 48) |
+ (bufs[0].opaque & BMAN_BUF_MASK));
+ if (i) {
+ for (i = 1; i < num; i++)
+ r->bufs[i].opaque =
+ cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK);
+ }
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ return 0;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags __maybe_unused)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ int ret, i;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs) {
+ for (i = 0; i < num; i++)
+ bufs[i].opaque =
+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
+ }
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) ==
+ BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ state->as.state.state[0] = be32_to_cpu(state->as.state.state[0]);
+ state->as.state.state[1] = be32_to_cpu(state->as.state.state[1]);
+ state->ds.state.state[0] = be32_to_cpu(state->ds.state.state[0]);
+ state->ds.state.state[1] = be32_to_cpu(state->ds.state.state[1]);
+ return 0;
+}
+
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ return bm_shutdown_pool(&p->p, bpid);
+}
diff --git a/drivers/bus/dpaa/base/qbman/bman.h b/drivers/bus/dpaa/base/qbman/bman.h
new file mode 100644
index 00000000..4b088da9
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman.h
@@ -0,0 +1,550 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __BMAN_H
+#define __BMAN_H
+
+#include "bman_priv.h"
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->ci + (o)))
+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
+ (bm)->ci + (o))
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->ce + (o))
+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->ce + (o)))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dccivac((bm)->ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'.
+ */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ DPAA_ASSERT(!rcr->busy);
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 1;
+#endif
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->busy);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+ dcbz_64(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_user);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+
+ DPAA_ASSERT(bpid < bman_pool_max);
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
+#else
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
+#else
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+
+ int aq_count = 0;
+ bool stop = false;
+
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ stop = true;
+ } else
+ ++aq_count;
+ };
+ return 0;
+}
+
+#endif /* __BMAN_H */
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
new file mode 100644
index 00000000..5c13a803
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -0,0 +1,323 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_branch_prediction.h>
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "bman_priv.h"
+#include <sys/ioctl.h>
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+u16 bman_ip_rev;
+u16 bman_pool_max;
+void *bman_ccsr_map;
+
+/*****************/
+/* Portal driver */
+/*****************/
+
+static __thread int fd = -1;
+static __thread struct bm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_bman
+};
+
+static int fsl_bman_portal_init(uint32_t idx, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct bman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ pcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (pcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu");
+ return -EINVAL;
+ }
+ pcfg.cpu = loop;
+ }
+ if (pcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!");
+ return -EINVAL;
+ }
+ /* Allocate and map a bman portal */
+ map.index = idx;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+ pcfg.is_shared = is_shared;
+ pcfg.index = map.index;
+ bman_depletion_fill(&pcfg.mask);
+
+ fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (fd == -1) {
+ pr_err("BMan irq init failed");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+ /* Use the IRQ FD as a unique IRQ number */
+ pcfg.irq = fd;
+
+ portal = bman_create_affine_portal(&pcfg);
+ if (!portal) {
+ pr_err("Bman portal initialisation failed (%d)",
+ pcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ /* Set the IRQ number */
+ irq_map.type = dpaa_portal_bman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(fd, &irq_map);
+ return 0;
+}
+
+static int fsl_bman_portal_finish(void)
+{
+ __maybe_unused const struct bm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(fd);
+
+ cfg = bman_destroy_affine_portal();
+ DPAA_BUG_ON(cfg != &pcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int bman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int bman_thread_finish(void)
+{
+ return fsl_bman_portal_finish();
+}
+
+void bman_thread_irq(void)
+{
+ qbman_invoke_irq(pcfg.irq);
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+int bman_init_ccsr(const struct device_node *node)
+{
+ static int ccsr_map_fd;
+ uint64_t phys_addr;
+ const uint32_t *bman_addr;
+ uint64_t regs_size;
+
+ bman_addr = of_get_address(node, 0, &regs_size, NULL);
+ if (!bman_addr) {
+ pr_err("of_get_address cannot return BMan address");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(node, bman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for BMan CCSR map");
+ return ccsr_map_fd;
+ }
+
+ bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (bman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map BMan CCSR base Bman: "
+ "0x%x Phys: 0x%lx size 0x%lx",
+ *bman_addr, phys_addr, regs_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bman_global_init(void)
+{
+ const struct device_node *dt_node;
+ static int done;
+
+ if (done)
+ return -EBUSY;
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
+ if (!dt_node) {
+ pr_err("No bman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ } else {
+ pr_warn("unknown BMan version in portal node,default "
+ "to rev1.0");
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ }
+
+ if (!bman_ip_rev) {
+ pr_err("Unknown bman portal version\n");
+ return -ENODEV;
+ }
+ {
+ const struct device_node *dn = of_find_compatible_node(NULL,
+ NULL, "fsl,bman");
+ if (!dn)
+ pr_err("No bman device node available");
+
+ if (bman_init_ccsr(dn))
+ pr_err("BMan CCSR map failed.");
+ }
+
+ done = 1;
+ return 0;
+}
+
+#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPAA_ASSERT(e < 0x10);
+ return (val | (e << 8));
+}
+
+#define POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ out_be32(bman_ccsr_map + POOL_SWDET(bpid),
+ __generate_thresh(thresholds[0], 0));
+ out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
+ __generate_thresh(thresholds[1], 1));
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(thresholds[2], 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(thresholds[3], 1));
+ return 0;
+}
+
+#define BMAN_LOW_DEFAULT_THRESH 0x40
+#define BMAN_HIGH_DEFAULT_THRESH 0x80
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh)
+{
+ if (!bman_ccsr_map)
+ return -ENODEV;
+ if (bpid >= bman_pool_max)
+ return -EINVAL;
+ if (low_thresh && high_thresh) {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(low_thresh, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(high_thresh, 1));
+ } else {
+ out_be32(bman_ccsr_map + POOL_HWDET(bpid),
+ __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
+ out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
+ __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
+ }
+ return 0;
+}
diff --git a/drivers/bus/dpaa/base/qbman/bman_priv.h b/drivers/bus/dpaa/base/qbman/bman_priv.h
new file mode 100644
index 00000000..07d9cec6
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/bman_priv.h
@@ -0,0 +1,125 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __BMAN_PRIV_H
+#define __BMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+
+#define BMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+#define BMAN_CCSR_MAP "/dev/mem"
+
+/* This mask contains all the "irqsource" bits visible to API users */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* This is used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* These are the buffer pool IDs that may be used via this portal. */
+ struct bman_depletion mask;
+
+};
+
+int bman_init_ccsr(const struct device_node *node);
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to Bman CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree).
+ */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* __BMAN_PRIV_H */
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_alloc.c b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
new file mode 100644
index 00000000..35dba7f7
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -0,0 +1,104 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2009-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+#include <process.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_bpid, result, count, align, partial);
+}
+
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ process_release(dpaa_id_bpid, bpid, count);
+}
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return process_reserve(dpaa_id_bpid, bpid, count);
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_fqid, result, count, align, partial);
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ process_release(dpaa_id_fqid, fqid, count);
+}
+
+int qman_reserve_fqid_range(u32 fqid, unsigned int count)
+{
+ return process_reserve(dpaa_id_fqid, fqid, count);
+}
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_qpool, result, count, align, partial);
+}
+
+void qman_release_pool_range(u32 pool, u32 count)
+{
+ process_release(dpaa_id_qpool, pool, count);
+}
+
+int qman_reserve_pool_range(u32 pool, u32 count)
+{
+ return process_reserve(dpaa_id_qpool, pool, count);
+}
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return process_alloc(dpaa_id_cgrid, result, count, align, partial);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ process_release(dpaa_id_cgrid, cgrid, count);
+}
+
+int qman_reserve_cgrid_range(u32 cgrid, u32 count)
+{
+ return process_reserve(dpaa_id_cgrid, cgrid, count);
+}
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
new file mode 100644
index 00000000..0017da52
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -0,0 +1,136 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <process.h>
+#include "dpaa_sys.h"
+
+struct process_interrupt {
+ int irq;
+ irqreturn_t (*isr)(int irq, void *arg);
+ unsigned long flags;
+ const char *name;
+ void *arg;
+ struct list_head node;
+};
+
+static COMPAT_LIST_HEAD(process_irq_list);
+static pthread_mutex_t process_irq_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void process_interrupt_install(struct process_interrupt *irq)
+{
+ int ret;
+ /* Add the irq to the end of the list */
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_add_tail(&irq->node, &process_irq_list);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static void process_interrupt_remove(struct process_interrupt *irq)
+{
+ int ret;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_del(&irq->node);
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+}
+
+static struct process_interrupt *process_interrupt_find(int irq_num)
+{
+ int ret;
+ struct process_interrupt *i = NULL;
+
+ ret = pthread_mutex_lock(&process_irq_lock);
+ assert(!ret);
+ list_for_each_entry(i, &process_irq_list, node) {
+ if (i->irq == irq_num)
+ goto done;
+ }
+done:
+ ret = pthread_mutex_unlock(&process_irq_lock);
+ assert(!ret);
+ return i;
+}
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name,
+ void *arg __maybe_unused)
+{
+ struct process_interrupt *irq_node =
+ kmalloc(sizeof(*irq_node), GFP_KERNEL);
+
+ if (!irq_node)
+ return -ENOMEM;
+ irq_node->irq = irq;
+ irq_node->isr = isr;
+ irq_node->flags = flags;
+ irq_node->name = name;
+ irq_node->arg = arg;
+ process_interrupt_install(irq_node);
+ return 0;
+}
+
+int qbman_free_irq(int irq, __maybe_unused void *arg)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (!irq_node)
+ return -EINVAL;
+ process_interrupt_remove(irq_node);
+ kfree(irq_node);
+ return 0;
+}
+
+/* This is the interface from the platform-specific driver code to obtain
+ * interrupt handlers that have been registered.
+ */
+void qbman_invoke_irq(int irq)
+{
+ struct process_interrupt *irq_node = process_interrupt_find(irq);
+
+ if (irq_node)
+ irq_node->isr(irq, irq_node->arg);
+}
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.h b/drivers/bus/dpaa/base/qbman/dpaa_sys.h
new file mode 100644
index 00000000..bee9fe57
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.h
@@ -0,0 +1,61 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <of.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#define DPAA_ASSERT(x) RTE_ASSERT(x)
+
+/* This is the interface from the platform-agnostic driver code to (de)register
+ * interrupt handlers. We simply create/destroy corresponding structs.
+ */
+int qbman_request_irq(int irq, irqreturn_t (*isr)(int irq, void *arg),
+ unsigned long flags, const char *name, void *arg);
+int qbman_free_irq(int irq, void *arg);
+
+void qbman_invoke_irq(int irq);
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c
new file mode 100644
index 00000000..b8ec5398
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -0,0 +1,331 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <assert.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#include "process.h"
+
+#include <fsl_usd.h>
+
+/* As higher-level drivers will be built on top of this (dma_mem, qbman, ...),
+ * it's preferable that the process driver itself not provide any exported API.
+ * As such, combined with the fact that none of these operations are
+ * performance critical, it is justified to use lazy initialisation, so that's
+ * what the lock is for.
+ */
+static int fd = -1;
+static pthread_mutex_t fd_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int check_fd(void)
+{
+ int ret;
+
+ if (fd >= 0)
+ return 0;
+ ret = pthread_mutex_lock(&fd_init_lock);
+ assert(!ret);
+ /* check again with the lock held */
+ if (fd < 0)
+ fd = open(PROCESS_PATH, O_RDWR);
+ ret = pthread_mutex_unlock(&fd_init_lock);
+ assert(!ret);
+ return (fd >= 0) ? 0 : -ENODEV;
+}
+
+#define DPAA_IOCTL_MAGIC 'u'
+struct dpaa_ioctl_id_alloc {
+ uint32_t base; /* Return value, the start of the allocated range */
+ enum dpaa_id_type id_type; /* what kind of resource(s) to allocate */
+ uint32_t num; /* how many IDs to allocate (and return value) */
+ uint32_t align; /* must be a power of 2, 0 is treated like 1 */
+ int partial; /* whether to allow less than 'num' */
+};
+
+struct dpaa_ioctl_id_release {
+ /* Input; */
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+struct dpaa_ioctl_id_reserve {
+ enum dpaa_id_type id_type;
+ uint32_t base;
+ uint32_t num;
+};
+
+#define DPAA_IOCTL_ID_ALLOC \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x01, struct dpaa_ioctl_id_alloc)
+#define DPAA_IOCTL_ID_RELEASE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x02, struct dpaa_ioctl_id_release)
+#define DPAA_IOCTL_ID_RESERVE \
+ _IOW(DPAA_IOCTL_MAGIC, 0x0A, struct dpaa_ioctl_id_reserve)
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial)
+{
+ struct dpaa_ioctl_id_alloc id = {
+ .id_type = id_type,
+ .num = num,
+ .align = align,
+ .partial = partial
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ ret = ioctl(fd, DPAA_IOCTL_ID_ALLOC, &id);
+ if (ret)
+ return ret;
+ for (ret = 0; ret < (int)id.num; ret++)
+ base[ret] = id.base + ret;
+ return id.num;
+}
+
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_release id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret) {
+ fprintf(stderr, "Process FD failure\n");
+ return;
+ }
+ ret = ioctl(fd, DPAA_IOCTL_ID_RELEASE, &id);
+ if (ret)
+ fprintf(stderr, "Process FD ioctl failure type %d base 0x%x num %d\n",
+ id_type, base, num);
+}
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num)
+{
+ struct dpaa_ioctl_id_reserve id = {
+ .id_type = id_type,
+ .base = base,
+ .num = num
+ };
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+ return ioctl(fd, DPAA_IOCTL_ID_RESERVE, &id);
+}
+
+/***************************************/
+/* Mapping and using QMan/BMan portals */
+/***************************************/
+
+#define DPAA_IOCTL_PORTAL_MAP \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x07, struct dpaa_ioctl_portal_map)
+#define DPAA_IOCTL_PORTAL_UNMAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x08, struct dpaa_portal_map)
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_MAP, params);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_MAP)");
+ return ret;
+ }
+ return 0;
+}
+
+int process_portal_unmap(struct dpaa_portal_map *map)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_PORTAL_UNMAP, map);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_PORTAL_UNMAP)");
+ return ret;
+ }
+ return 0;
+}
+
+#define DPAA_IOCTL_PORTAL_IRQ_MAP \
+ _IOW(DPAA_IOCTL_MAGIC, 0x09, struct dpaa_ioctl_irq_map)
+
+int process_portal_irq_map(int ifd, struct dpaa_ioctl_irq_map *map)
+{
+ map->fd = fd;
+ return ioctl(ifd, DPAA_IOCTL_PORTAL_IRQ_MAP, map);
+}
+
+int process_portal_irq_unmap(int ifd)
+{
+ return close(ifd);
+}
+
+struct dpaa_ioctl_raw_portal {
+ /* inputs */
+ enum dpaa_portal_type type; /* Type of portal to allocate */
+
+ uint8_t enable_stash; /* set to non zero to turn on stashing */
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+#define DPAA_IOCTL_ALLOC_RAW_PORTAL \
+ _IOWR(DPAA_IOCTL_MAGIC, 0x0C, struct dpaa_ioctl_raw_portal)
+
+#define DPAA_IOCTL_FREE_RAW_PORTAL \
+ _IOR(DPAA_IOCTL_MAGIC, 0x0D, struct dpaa_ioctl_raw_portal)
+
+static int process_portal_allocate(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_ALLOC_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_ALLOC_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+static int process_portal_free(struct dpaa_ioctl_raw_portal *portal)
+{
+ int ret = check_fd();
+
+ if (ret)
+ return ret;
+
+ ret = ioctl(fd, DPAA_IOCTL_FREE_RAW_PORTAL, portal);
+ if (ret) {
+ perror("ioctl(DPAA_IOCTL_FREE_RAW_PORTAL)");
+ return ret;
+ }
+ return 0;
+}
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.enable_stash = portal->enable_stash;
+ input.cpu = portal->cpu;
+ input.cache = portal->cache;
+ input.window = portal->window;
+ input.sdest = portal->sdest;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int qman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_qman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+ int ret;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.enable_stash = 0;
+
+ ret = process_portal_allocate(&input);
+ if (ret)
+ return ret;
+ portal->index = input.index;
+ portal->cinh = input.cinh;
+ portal->cena = input.cena;
+ return 0;
+}
+
+int bman_free_raw_portal(struct dpaa_raw_portal *portal)
+{
+ struct dpaa_ioctl_raw_portal input;
+
+ input.type = dpaa_portal_bman;
+ input.index = portal->index;
+ input.cinh = portal->cinh;
+ input.cena = portal->cena;
+
+ return process_portal_free(&input);
+}
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
new file mode 100644
index 00000000..87fec60d
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -0,0 +1,2497 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman.h"
+#include <rte_branch_prediction.h>
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+/* maximum number of DQRR entries to process in qman_poll() */
+#define FSL_QMAN_POLL_LIMIT 8
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...).
+ */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ dpaa_set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ dpaa_clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead.
+ */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* track if memory was allocated by the driver */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
+ * do byte swaps of DQRR read only memory. First entry must be aligned
+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
+ * address (6 bits for address shift + 4 bits for the DQRR size).
+ */
+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
+ __attribute__((aligned(1024)));
+#endif
+};
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler.
+ */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &RTE_PER_LCORE(qman_affine_portal);
+}
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table.
+ */
+IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table) {
+ pr_err("QMan: Could not allocate fq lookup table\n");
+ return -ENOMEM;
+ }
+ memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
+ qman_fq_lookup_table_size = num_entries;
+ pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table,
+ (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field.
+ */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to HW format */
+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
+ fqd->context_b = cpu_to_be32(fqd->context_b);
+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
+ fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
+}
+
+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to CPU format */
+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
+ fqd->context_b = be32_to_cpu(fqd->context_b);
+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
+}
+
+static inline void cpu_to_hw_fd(struct qm_fd *fd)
+{
+ fd->addr = cpu_to_be40(fd->addr);
+ fd->status = cpu_to_be32(fd->status);
+ fd->opaque = cpu_to_be32(fd->opaque);
+}
+
+static inline void hw_fd_to_cpu(struct qm_fd *fd)
+{
+ fd->addr = be40_to_cpu(fd->addr);
+ fd->status = be32_to_cpu(fd->status);
+ fd->opaque = be32_to_cpu(fd->opaque);
+}
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do.
+ */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues().
+ */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = mfatb();
+
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case.
+ */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi, ci;
+ u32 cfg;
+
+ /*
+ * Disable EQCI stashing because the QMan only
+ * presents the value it previously stashed to
+ * maintain coherency. Setting the stash threshold
+ * to 1 then 0 ensures that QMan has resyncronized
+ * its internal copy so that the portal is clean
+ * when it is reinitialized in the future
+ */
+ cfg = (qm_in(CFG) & 0x0fffffff) |
+ (1 << 28); /* QCSP_CFG: EST */
+ qm_out(CFG, cfg);
+ cfg &= 0x0fffffff; /* stash threshold = 0 */
+ qm_out(CFG, cfg);
+
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ /* Refresh EQCR CI cache value */
+ qm_cl_invalidate(EQCR_CI);
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ __maybe_unused const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dccivac(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline int qm_mr_init(struct qm_portal *portal,
+ __maybe_unused enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+static inline
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("Qman DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ pr_err("Qman MC initialisation failed\n");
+ goto fail_mc;
+ }
+
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, 0);
+ qm_mr_set_ithresh(p, 0);
+ qm_isr_set_iperiod(p, 0);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", c->channel);
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(p, portal->irq_sources);
+ qm_isr_status_clear(p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_isr_disable_write(p, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ pr_err("Qman EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(p, isdr);
+ if (qm_dqrr_current(p)) {
+ pr_err("Qman DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(p))
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_isr_disable_write(p, 0);
+ qm_isr_uninhibit(p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+ spin_lock_destroy(&portal->cgr_lock);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return NULL;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal = get_affine_portal();
+ /* A criteria for calling this function (from qman_driver.c) is that
+ * we're already affine to the cpu and won't schedule onto another cpu.
+ */
+
+ res = qman_create_portal(portal, c, cgrs);
+ if (res) {
+ spin_lock(&affine_mask_lock);
+ CPU_SET(c->cpu, &affine_mask);
+ affine_channels[c->cpu] =
+ c->channel;
+ spin_unlock(&affine_mask_lock);
+ }
+ return res;
+}
+
+static inline
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+
+ spin_lock_destroy(&qm->cgr_lock);
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ CPU_CLR(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ return pcfg;
+}
+
+int qman_get_portal_index(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ return p->config->index;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+ struct qm_mr_entry swapped_msg;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ swapped_msg = *msg;
+ hw_fd_to_cpu(&swapped_msg.ern.fd);
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p,
+ be32_to_cpu(msg->fq.fqid));
+ DPAA_BUG_ON(!fq);
+ fq_state_change(p, fq, &swapped_msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(
+ be32_to_cpu(msg->fq.contextB));
+#else
+ fq = (void *)(uintptr_t)
+ be32_to_cpu(msg->fq.contextB);
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else {
+ static int warn_once;
+
+ if (!warn_once) {
+ pr_crit("Leaking DCP ERNs!\n");
+ warn_once = 1;
+ }
+ }
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
+ fq->cb.ern(p, fq, &swapped_msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_set_vdq() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_set_vdq() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ }
+ DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qm_dqrr_entry *dq;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+#endif
+
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ return NULL;
+
+ if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
+ /* Invalid DQRR - put the portal and consume the DQRR.
+ * Return NULL to user as no packet is seen.
+ */
+ qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
+ return NULL;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+
+ return (struct qm_dqrr_entry *)dq;
+}
+
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
+ qm_dqrr_next(&p->p);
+}
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_affine_portal();
+ int ret;
+
+ ret = __poll_portal_fast(p, limit);
+ return ret;
+}
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
+}
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qman_stop_dequeues_ex(p);
+}
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ DPAA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+}
+
+void qman_static_dequeue_add(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+void qman_static_dequeue_del(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ pools &= p->config->pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+}
+
+u32 qman_static_dequeue_get(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ return p->sdqcr;
+}
+
+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+
+/* Frame queue API */
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
+ pr_info("Find empty table entry failed\n");
+ return -ENOMEM;
+ }
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(&fqd);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those.
+ */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPAA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ return 0;
+err:
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = fq->key;
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = rte_mem_virt2iova(fq);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
+ cpu_to_hw_fqd(&mcc->initfq.fqd);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ return 0;
+}
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+
+ return ret;
+}
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ struct qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ return rval;
+}
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ return ret;
+}
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(fqd);
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_fq_has_pkts(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ int ret = 0;
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ ret = !!mcr->queryfq_np.frm_cnt;
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *np = mcr->queryfq_np;
+ np->fqd_link = be24_to_cpu(np->fqd_link);
+ np->odp_seq = be16_to_cpu(np->odp_seq);
+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
+ np->ics_surp = be16_to_cpu(np->ics_surp);
+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
+ }
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res, myverb;
+
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ int i, array_len;
+
+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ for (i = 0; i < array_len; i++)
+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
+ }
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ cgrd->cgr.wr_parm_g.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
+ cgrd->cgr.wr_parm_y.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
+ cgrd->cgr.wr_parm_r.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ cgrd->cscn_targ_swp[i] =
+ be32_to_cpu(cgrd->cscn_targ_swp[i]);
+ return 0;
+}
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 res;
+ unsigned int i;
+
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *congestion = mcr->querycongestion;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
+ congestion->state.state[i] =
+ be32_to_cpu(congestion->state.state[i]);
+ return 0;
+}
+
+int qman_set_vdq(struct qman_fq *fq, u16 num)
+{
+ struct qman_portal *p = get_affine_portal();
+ uint32_t vdqcr;
+ int ret = -EBUSY;
+
+ vdqcr = QM_VDQCR_EXACT;
+ vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+
+out:
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret = -EBUSY;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+
+ p = get_affine_portal();
+
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ if (ret)
+ return ret;
+
+ /* VDQCR is set */
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return 0;
+}
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ return (avail == 0);
+}
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ struct qman_portal *p = get_affine_portal();
+
+ p->cb_dc_ern = handler;
+ } else
+ cb_dc_ern = handler;
+}
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ return NULL;
+
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd = *fd;
+ cpu_to_hw_fd(&eq->fd);
+ return eq;
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ return 0;
+}
+
+int qman_enqueue_multi(struct qman_fq *fq,
+ const struct qm_fd *fd,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
+
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_eqcr_entry *eq;
+
+ eq = try_p_eq_start(p, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+
+ return 0;
+}
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
+ mcc->initcgr.cgr.wr_parm_g.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
+ mcc->initcgr.cgr.wr_parm_y.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
+ mcc->initcgr.cgr.wr_parm_r.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
+
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ p = get_affine_portal();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file.
+ */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+ return ret;
+}
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcc_initcgr local_opts;
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("QMan version doesn't support CSCN => DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ return ret;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_DCP_MASK(dcp_portal);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ pr_crit("Attempting to delete cgr from different portal than"
+ " it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock(&p->cgr_lock);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock(&p->cgr_lock);
+put_portal:
+ return ret;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct qm_portal *low_p;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, drain = 0;
+ u32 result;
+ u32 channel, wq;
+ u16 dest_wq;
+
+ p = get_affine_portal();
+ low_p = &p->p;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(low_p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
+ channel = dest_wq & 0x7;
+ wq = dest_wq >> 3;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ __maybe_unused u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (u16)(qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1) << 4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x,"
+ " it is scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(low_p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ low_p, dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x,"
+ " result=0x%x\n", fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ * ERNs come in.
+ */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ * to be drained.
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(low_p, vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ * empty the ring completely.
+ */
+ while (dqrr) {
+ if (dqrr->fqid == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(low_p,
+ dqrr, 0);
+ qm_dqrr_pvb_update(low_p);
+ qm_dqrr_next(low_p);
+ dqrr = qm_dqrr_current(low_p);
+ }
+ } while (fq_empty == 0);
+ }
+ qm_dqrr_sdqcr_set(low_p, 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(low_p);
+ qm_mr_cci_consume_to_current(low_p);
+ qm_mr_pvb_update(low_p);
+ msg = qm_mr_current(low_p);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err(
+ "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(low_p);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(low_p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+
+ }
+ return -1;
+}
diff --git a/drivers/bus/dpaa/base/qbman/qman.h b/drivers/bus/dpaa/base/qbman/qman.h
new file mode 100644
index 00000000..2c0f694c
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman.h
@@ -0,0 +1,888 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent).
+ */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->ci + (o)))
+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
+ (qm)->ci + (o))
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->ce + (o))
+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->ce + (o)))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dccivac((qm)->ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues.
+ */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+extern dma_addr_t rte_mem_virt2iova(const void *addr);
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'.
+ */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 1;
+#endif
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(eqcr->busy);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPAA_ASSERT(eqcr->busy); \
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline const struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* -------------- */
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+/* ------------------------------ */
+/* --- Management command API --- */
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ QM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_user;
+#endif
+ dcbz_64(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering.
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+/* Portal interrupt register API */
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+#if defined(RTE_ARCH_ARM64)
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
+#else
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+#if defined(RTE_ARCH_ARM64)
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
+#else
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+#endif
+}
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
new file mode 100644
index 00000000..7a688967
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -0,0 +1,288 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fsl_usd.h>
+#include <process.h>
+#include "qman_priv.h"
+#include <sys/ioctl.h>
+#include <rte_branch_prediction.h>
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available).
+ */
+u16 qman_ip_rev;
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+
+/* Ccsr map address to access ccsrbased register */
+void *qman_ccsr_map;
+/* The qman clock frequency */
+u32 qman_clk;
+
+static __thread int fd = -1;
+static __thread struct qm_portal_config pcfg;
+static __thread struct dpaa_ioctl_portal_map map = {
+ .type = dpaa_portal_qman
+};
+
+static int fsl_qman_portal_init(uint32_t index, int is_shared)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *portal;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return ret;
+ }
+ pcfg.cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (pcfg.cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ return -EINVAL;
+ }
+ pcfg.cpu = loop;
+ }
+ if (pcfg.cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ return -EINVAL;
+ }
+
+ /* Allocate and map a qman portal */
+ map.index = index;
+ ret = process_portal_map(&map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return ret;
+ }
+ pcfg.channel = map.channel;
+ pcfg.pools = map.pools;
+ pcfg.index = map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+
+ fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (fd == -1) {
+ pr_err("QMan irq init failed\n");
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ pcfg.is_shared = is_shared;
+ pcfg.node = NULL;
+ pcfg.irq = fd;
+
+ portal = qman_create_affine_portal(&pcfg, NULL);
+ if (!portal) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ pcfg.cpu);
+ process_portal_unmap(&map.addr);
+ return -EBUSY;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = map.addr.cinh;
+ process_portal_irq_map(fd, &irq_map);
+ return 0;
+}
+
+static int fsl_qman_portal_finish(void)
+{
+ __maybe_unused const struct qm_portal_config *cfg;
+ int ret;
+
+ process_portal_irq_unmap(fd);
+
+ cfg = qman_destroy_affine_portal();
+ DPAA_BUG_ON(cfg != &pcfg);
+ ret = process_portal_unmap(&map.addr);
+ if (ret)
+ error(0, ret, "process_portal_unmap()");
+ return ret;
+}
+
+int qman_thread_init(void)
+{
+ /* Convert from contiguous/virtual cpu numbering to real cpu when
+ * calling into the code that is dependent on the device naming.
+ */
+ return fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
+}
+
+int qman_thread_finish(void)
+{
+ return fsl_qman_portal_finish();
+}
+
+void qman_thread_irq(void)
+{
+ qbman_invoke_irq(pcfg.irq);
+
+ /* Now we need to uninhibit interrupts. This is the only code outside
+ * the regular portal driver that manipulates any portal register, so
+ * rather than breaking that encapsulation I am simply hard-coding the
+ * offset to the inhibit register here.
+ */
+ out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+int qman_global_init(void)
+{
+ const struct device_node *dt_node;
+ int ret = 0;
+ size_t lenp;
+ const u32 *chanid;
+ static int ccsr_map_fd;
+ const uint32_t *qman_addr;
+ uint64_t phys_addr;
+ uint64_t regs_size;
+ const u32 *clk;
+
+ static int done;
+
+ if (done)
+ return -EBUSY;
+
+ /* Use the device-tree to determine IP revision until something better
+ * is devised.
+ */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman-portal");
+ if (!dt_node) {
+ pr_err("No qman portals available for any CPU\n");
+ return -ENODEV;
+ }
+ if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.0.0"))
+ pr_err("QMan rev1.0 on P4080 rev1 is not supported!\n");
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.1.0"))
+ qman_ip_rev = QMAN_REV11;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-1.2.0"))
+ qman_ip_rev = QMAN_REV12;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-2.0.0"))
+ qman_ip_rev = QMAN_REV20;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.0.1"))
+ qman_ip_rev = QMAN_REV30;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.1") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.2") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.1.3"))
+ qman_ip_rev = QMAN_REV31;
+ else if (of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.0") ||
+ of_device_is_compatible(dt_node, "fsl,qman-portal-3.2.1"))
+ qman_ip_rev = QMAN_REV32;
+ else
+ qman_ip_rev = QMAN_REV11;
+
+ if (!qman_ip_rev) {
+ pr_err("Unknown qman portal version\n");
+ return -ENODEV;
+ }
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,pool-channel-range");
+ if (!dt_node) {
+ pr_err("No qman pool channel range available\n");
+ return -ENODEV;
+ }
+ chanid = of_get_property(dt_node, "fsl,pool-channel-range", &lenp);
+ if (!chanid) {
+ pr_err("Can not get pool-channel-range property\n");
+ return -EINVAL;
+ }
+
+ /* get ccsr base */
+ dt_node = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dt_node) {
+ pr_err("No qman device node available\n");
+ return -ENODEV;
+ }
+ qman_addr = of_get_address(dt_node, 0, &regs_size, NULL);
+ if (!qman_addr) {
+ pr_err("of_get_address cannot return qman address\n");
+ return -EINVAL;
+ }
+ phys_addr = of_translate_address(dt_node, qman_addr);
+ if (!phys_addr) {
+ pr_err("of_translate_address failed\n");
+ return -EINVAL;
+ }
+
+ ccsr_map_fd = open("/dev/mem", O_RDWR);
+ if (unlikely(ccsr_map_fd < 0)) {
+ pr_err("Can not open /dev/mem for qman ccsr map\n");
+ return ccsr_map_fd;
+ }
+
+ qman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, ccsr_map_fd, phys_addr);
+ if (qman_ccsr_map == MAP_FAILED) {
+ pr_err("Can not map qman ccsr base\n");
+ return -EINVAL;
+ }
+
+ clk = of_get_property(dt_node, "clock-frequency", NULL);
+ if (!clk)
+ pr_warn("Can't find Qman clock frequency\n");
+ else
+ qman_clk = be32_to_cpu(*clk);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ ret = qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
+ if (ret)
+ return ret;
+#endif
+ return 0;
+}
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
new file mode 100644
index 00000000..3e1d7f92
--- /dev/null
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -0,0 +1,310 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __QMAN_PRIV_H
+#define __QMAN_PRIV_H
+
+#include "dpaa_sys.h"
+#include <fsl_qman.h>
+
+/* Congestion Groups */
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a,
+ const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device_node *node;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* If the caller enables DQRR stashing (and thus wishes to operate the
+ * portal from only one cpu), this is the logical CPU that the portal
+ * will stash to. Whether stashing is enabled or not, this setting is
+ * also used for any "core-affine" portals, ie. default portals
+ * associated to the corresponding cpu. -1 implies that there is no
+ * core affinity configured.
+ */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /* the unique index of this portal */
+ u32 index;
+ /* Is this portal shared? (If so, it has coarser locking and demuxes
+ * processing on behalf of other CPUs.).
+ */
+ int is_shared;
+ /* The portal's dedicated channel id, use this value for initialising
+ * frame queues to target this portal when scheduled.
+ */
+ u16 channel;
+ /* A mask of which pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).
+ */
+ u32 pools;
+
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+extern u32 qman_clk;
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg);
+void qm_set_liodns(struct qm_portal_config *pcfg);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally.
+ */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required.
+ */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_VERB_COLOUR_MASK 0x18 /* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN 0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW 0x08
+#define QM_EQCR_VERB_COLOUR_RED 0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE 0x18
+#define QM_EQCR_VERB_INTERRUPT 0x04 /* on command consumption */
+#define QM_EQCR_VERB_ORP 0x02 /* enable order restoration */
+#define QM_EQCR_DCA_ENABLE 0x80
+#define QM_EQCR_DCA_PARK 0x40
+#define QM_EQCR_DCA_IDXMASK 0x0f /* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+#define QM_EQCR_FQID_NULL 0 /* eg. for an ORP seqnum hole */
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write".
+ */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#define QMAN_PORTAL_IRQ_PATH "/dev/fsl-usdpaa-irq"
+
+#endif /* _QMAN_PRIV_H */
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
new file mode 100644
index 00000000..1cc8c893
--- /dev/null
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -0,0 +1,525 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_bus.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+int dpaa_logtype_bus;
+int dpaa_logtype_mempool;
+int dpaa_logtype_pmd;
+
+struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
+
+/* define a variable to hold the portal_key, once created.*/
+pthread_key_t dpaa_portal_key;
+
+RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
+
+static inline void
+dpaa_add_to_device_list(struct rte_dpaa_device *dev)
+{
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+}
+
+static inline void
+dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
+{
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+}
+
+/*
+ * Reads the SEC device from DTS
+ * Returns -1 if SEC devices not available, 0 otherwise
+ */
+static inline int
+dpaa_sec_available(void)
+{
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpaa_clean_device_list(void);
+
+static int
+dpaa_create_device_list(void)
+{
+ int i;
+ int ret;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ /* Creating Ethernet Devices */
+ for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cfg = &dpaa_netcfg->port_cfg[i];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->device_type = FSL_DPAA_ETH;
+ dev->id.dev_id = i;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+ DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+ dev->device.name = dev->name;
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = i;
+
+ /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
+ * constantly created only if "sec" property is found in the device
+ * tree. Logically there is no limit for number of devices (QI
+ * interfaces) that can be created.
+ */
+
+ if (dpaa_sec_available()) {
+ DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+ return 0;
+ }
+
+ /* Creating SEC Devices */
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
+ ret = -1;
+ goto cleanup;
+ }
+
+ dev->device_type = FSL_DPAA_CRYPTO;
+ dev->id.dev_id = rte_dpaa_bus.device_count + i;
+
+ /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
+ * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
+ * allocated for dev->name/
+ */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "dpaa-sec%d", i);
+ DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count += i;
+
+ return 0;
+
+cleanup:
+ dpaa_clean_device_list();
+ return ret;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+/** XXX move this function into a separate file */
+static int
+_dpaa_portal_init(void *arg)
+{
+ cpu_set_t cpuset;
+ pthread_t id;
+ uint32_t cpu = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+ cpu = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else
+ if (cpu >= RTE_MAX_LCORE)
+ return -1;
+
+ /* Set CPU affinity for this thread */
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ id = pthread_self();
+ ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
+ "core :%d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
+ "core %d with ret: %d", cpu, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(_dpaa_io) = true;
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+/*
+ * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
+ * XXX Complete this
+ */
+int
+rte_dpaa_portal_init(void *arg)
+{
+ if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
+ return _dpaa_portal_init(arg);
+
+ return 0;
+}
+
+void
+dpaa_portal_finish(void *arg)
+{
+ struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
+
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
+ return;
+ }
+
+ bman_thread_finish();
+ qman_thread_finish();
+
+ pthread_setspecific(dpaa_portal_key, NULL);
+
+ rte_free(dpaa_io_portal);
+ dpaa_io_portal = NULL;
+
+ RTE_PER_LCORE(_dpaa_io) = false;
+}
+
+#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
+#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
+
+static int
+rte_dpaa_bus_scan(void)
+{
+ int ret;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
+ return 0;
+ }
+
+ /* Load the device-tree driver */
+ ret = of_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
+ return -1;
+ }
+
+ /* Get the interface configurations from device-tree */
+ dpaa_netcfg = netcfg_acquire();
+ if (!dpaa_netcfg) {
+ DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
+ return -EINVAL;
+ }
+
+ RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
+
+ if (!dpaa_netcfg->num_ethports) {
+ DPAA_BUS_LOG(INFO, "no network interfaces available");
+ /* This is not an error */
+ return 0;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
+ dpaa_netcfg, dpaa_netcfg->num_ethports);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dump_netcfg(dpaa_netcfg);
+#endif
+
+ DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
+ dpaa_netcfg->num_ethports);
+ ret = dpaa_create_device_list();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
+ return ret;
+ }
+
+ /* create the key, supplying a function that'll be invoked
+ * when a portal affined thread will be deleted.
+ */
+ ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ if (ret) {
+ DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ dpaa_clean_device_list();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
+ (unsigned int)dpaa_portal_key, ret);
+
+ return 0;
+}
+
+/* register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ BUS_INIT_FUNC_TRACE();
+
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = &rte_dpaa_bus;
+}
+
+/* un-register a dpaa bus based dpaa driver */
+void
+rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
+{
+ struct rte_dpaa_bus *dpaa_bus;
+
+ BUS_INIT_FUNC_TRACE();
+
+ dpaa_bus = driver->dpaa_bus;
+
+ TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->dpaa_bus = NULL;
+}
+
+static int
+rte_dpaa_device_match(struct rte_dpaa_driver *drv,
+ struct rte_dpaa_device *dev)
+{
+ int ret = -1;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if (!drv || !dev) {
+ DPAA_BUS_DEBUG("Invalid drv or dev received.");
+ return ret;
+ }
+
+ if (drv->drv_type == dev->device_type) {
+ DPAA_BUS_INFO("Device: %s matches for driver: %s",
+ dev->name, drv->driver.name);
+ ret = 0; /* Found a match */
+ }
+
+ return ret;
+}
+
+static int
+rte_dpaa_bus_probe(void)
+{
+ int ret = -1;
+ struct rte_dpaa_device *dev;
+ struct rte_dpaa_driver *drv;
+
+ BUS_INIT_FUNC_TRACE();
+
+ /* For each registered driver, and device, call the driver->probe */
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
+ ret = rte_dpaa_device_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe)
+ continue;
+
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA_BUS_ERR("Unable to probe.\n");
+ break;
+ }
+ }
+ return 0;
+}
+
+static struct rte_device *
+rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_dpaa_device *dev;
+
+ TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
+ if (start && &dev->device == start) {
+ start = NULL; /* starting point found */
+ continue;
+ }
+
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa_get_iommu_class(void)
+{
+ return RTE_IOVA_PA;
+}
+
+struct rte_dpaa_bus rte_dpaa_bus = {
+ .bus = {
+ .scan = rte_dpaa_bus_scan,
+ .probe = rte_dpaa_bus_probe,
+ .find_device = rte_dpaa_find_device,
+ .get_iommu_class = rte_dpaa_get_iommu_class,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
+ .device_count = 0,
+};
+
+RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
+
+RTE_INIT(dpaa_init_log);
+static void
+dpaa_init_log(void)
+{
+ dpaa_logtype_bus = rte_log_register("bus.dpaa");
+ if (dpaa_logtype_bus >= 0)
+ rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
+
+ dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
+ if (dpaa_logtype_mempool >= 0)
+ rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
+
+ dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
+ if (dpaa_logtype_pmd >= 0)
+ rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
new file mode 100644
index 00000000..42733aeb
--- /dev/null
+++ b/drivers/bus/dpaa/include/compat.h
@@ -0,0 +1,385 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __COMPAT_H
+#define __COMPAT_H
+
+#include <sched.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <linux/types.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <assert.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <error.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#define __maybe_unused __rte_unused
+#define __always_unused __rte_unused
+#define __packed __rte_packed
+#define noinline __attribute__((noinline))
+
+#define L1_CACHE_BYTES 64
+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#else
+#define pr_debug(fmt, args...) {}
+#endif
+
+#define DPAA_BUG_ON(x) RTE_ASSERT(x)
+
+/* Required types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+typedef cpu_set_t cpumask_t;
+typedef uint32_t phandle;
+typedef uint32_t gfp_t;
+typedef uint32_t irqreturn_t;
+
+#define IRQ_HANDLED 0
+#define request_irq qbman_request_irq
+#define free_irq qbman_free_irq
+
+#define __iomem
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+/* to be used as an upper-limit only */
+#define NR_CPUS 64
+
+/* Waitqueue stuff */
+typedef struct { } wait_queue_head_t;
+#define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
+#define wake_up(x) do { } while (0)
+
+/* I/O operations */
+static inline u32 in_be32(volatile void *__p)
+{
+ volatile u32 *p = __p;
+ return rte_be_to_cpu_32(*p);
+}
+
+static inline void out_be32(volatile void *__p, u32 val)
+{
+ volatile u32 *p = __p;
+ *p = rte_cpu_to_be_32(val);
+}
+
+#define dcbt_ro(p) __builtin_prefetch(p, 0)
+#define dcbt_rw(p) __builtin_prefetch(p, 1)
+
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define dcbz_64(p) dcbz(p)
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+
+#define dcbit_ro(p) \
+ do { \
+ dccivac(p); \
+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
+ } while (0)
+
+#define barrier() { asm volatile ("" : : : "memory"); }
+#define cpu_relax barrier
+
+static inline uint64_t mfatb(void)
+{
+ uint64_t ret, ret_new, timeout = 200;
+
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ while (ret != ret_new && timeout--) {
+ ret = ret_new;
+ asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
+ }
+ DPAA_BUG_ON(!timeout && (ret != ret_new));
+ return ret * 64;
+}
+
+/* Spin for a few cycles without bothering the bus */
+static inline void cpu_spin(int cycles)
+{
+ uint64_t now = mfatb();
+
+ while (mfatb() < (now + cycles))
+ ;
+}
+
+/* Qman/Bman API inlines and macros; */
+#ifdef lower_32_bits
+#undef lower_32_bits
+#endif
+#define lower_32_bits(x) ((u32)(x))
+
+#ifdef upper_32_bits
+#undef upper_32_bits
+#endif
+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+
+/*
+ * Swap bytes of a 48-bit value.
+ */
+static inline uint64_t
+__bswap_48(uint64_t x)
+{
+ return ((x & 0x0000000000ffULL) << 40) |
+ ((x & 0x00000000ff00ULL) << 24) |
+ ((x & 0x000000ff0000ULL) << 8) |
+ ((x & 0x0000ff000000ULL) >> 8) |
+ ((x & 0x00ff00000000ULL) >> 24) |
+ ((x & 0xff0000000000ULL) >> 40);
+}
+
+/*
+ * Swap bytes of a 40-bit value.
+ */
+static inline uint64_t
+__bswap_40(uint64_t x)
+{
+ return ((x & 0x00000000ffULL) << 32) |
+ ((x & 0x000000ff00ULL) << 16) |
+ ((x & 0x0000ff0000ULL)) |
+ ((x & 0x00ff000000ULL) >> 16) |
+ ((x & 0xff00000000ULL) >> 32);
+}
+
+/*
+ * Swap bytes of a 24-bit value.
+ */
+static inline uint32_t
+__bswap_24(uint32_t x)
+{
+ return ((x & 0x0000ffULL) << 16) |
+ ((x & 0x00ff00ULL)) |
+ ((x & 0xff0000ULL) >> 16);
+}
+
+#define be64_to_cpu(x) rte_be_to_cpu_64(x)
+#define be32_to_cpu(x) rte_be_to_cpu_32(x)
+#define be16_to_cpu(x) rte_be_to_cpu_16(x)
+
+#define cpu_to_be64(x) rte_cpu_to_be_64(x)
+#define cpu_to_be32(x) rte_cpu_to_be_32(x)
+#define cpu_to_be16(x) rte_cpu_to_be_16(x)
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define cpu_to_be48(x) __bswap_48(x)
+#define be48_to_cpu(x) __bswap_48(x)
+
+#define cpu_to_be40(x) __bswap_40(x)
+#define be40_to_cpu(x) __bswap_40(x)
+
+#define cpu_to_be24(x) __bswap_24(x)
+#define be24_to_cpu(x) __bswap_24(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define cpu_to_be48(x) (x)
+#define be48_to_cpu(x) (x)
+
+#define cpu_to_be40(x) (x)
+#define be40_to_cpu(x) (x)
+
+#define cpu_to_be24(x) (x)
+#define be24_to_cpu(x) (x)
+
+#endif /* RTE_BIG_ENDIAN */
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+/* memcpy() stuff - when you know alignments in advance */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x3);
+ DPAA_BUG_ON((unsigned long)src & 0x3);
+ DPAA_BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+
+ DPAA_BUG_ON((unsigned long)dest & 0x1);
+ DPAA_BUG_ON((unsigned long)src & 0x1);
+ DPAA_BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/* Allocator stuff */
+#define kmalloc(sz, t) malloc(sz)
+#define vmalloc(sz) malloc(sz)
+#define kfree(p) { if (p) free(p); }
+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
+{
+ void *ptr = malloc(sz);
+
+ if (ptr)
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
+{
+ void *p;
+
+ if (posix_memalign(&p, 4096, 4096))
+ return 0;
+ memset(p, 0, 4096);
+ return (unsigned long)p;
+}
+
+/* Spinlock stuff */
+#define spinlock_t rte_spinlock_t
+#define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define spin_lock_init(x) rte_spinlock_init(x)
+#define spin_lock_destroy(x)
+#define spin_lock(x) rte_spinlock_lock(x)
+#define spin_unlock(x) rte_spinlock_unlock(x)
+#define spin_lock_irq(x) spin_lock(x)
+#define spin_unlock_irq(x) spin_unlock(x)
+#define spin_lock_irqsave(x, f) spin_lock_irq(x)
+#define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#include <dpaa_list.h>
+#include <dpaa_bits.h>
+
+#endif /* __COMPAT_H */
diff --git a/drivers/bus/dpaa/include/dpaa_bits.h b/drivers/bus/dpaa/include/dpaa_bits.h
new file mode 100644
index 00000000..71f2d809
--- /dev/null
+++ b/drivers/bus/dpaa/include/dpaa_bits.h
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_BITS_H
+#define __DPAA_BITS_H
+
+/* Bitfield stuff. */
+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
+#define BITS_MASK(idx) (1UL << ((idx) & (BITS_PER_ULONG - 1)))
+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
+
+static inline void dpaa_set_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p |= mask;
+}
+
+static inline void dpaa_set_bit(int idx, volatile unsigned long *bits)
+{
+ dpaa_set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void dpaa_clear_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void dpaa_clear_bit(int idx,
+ volatile unsigned long *bits)
+{
+ dpaa_clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+#endif /* __DPAA_BITS_H */
diff --git a/drivers/bus/dpaa/include/dpaa_list.h b/drivers/bus/dpaa/include/dpaa_list.h
new file mode 100644
index 00000000..871e6121
--- /dev/null
+++ b/drivers/bus/dpaa/include/dpaa_list.h
@@ -0,0 +1,101 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_LIST_H
+#define __DPAA_LIST_H
+
+/****************/
+/* Linked-lists */
+/****************/
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define COMPAT_LIST_HEAD(n) \
+struct list_head n = { \
+ .prev = &n, \
+ .next = &n \
+}
+
+#define INIT_LIST_HEAD(p) \
+do { \
+ struct list_head *__p298 = (p); \
+ __p298->next = __p298; \
+ __p298->prev = __p298->next; \
+} while (0)
+#define list_entry(node, type, member) \
+ (type *)((void *)node - offsetof(type, member))
+#define list_empty(p) \
+({ \
+ const struct list_head *__p298 = (p); \
+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
+})
+#define list_add(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->next = __l298->next; \
+ __p298->prev = __l298; \
+ __l298->next->prev = __p298; \
+ __l298->next = __p298; \
+} while (0)
+#define list_add_tail(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->prev = __l298->prev; \
+ __p298->next = __l298; \
+ __l298->prev->next = __p298; \
+ __l298->prev = __p298; \
+} while (0)
+#define list_for_each(i, l) \
+ for (i = (l)->next; i != (l); i = i->next)
+#define list_for_each_safe(i, j, l) \
+ for (i = (l)->next, j = i->next; i != (l); \
+ i = j, j = i->next)
+#define list_for_each_entry(i, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
+ i = list_entry(i->name.next, typeof(*i), name))
+#define list_for_each_entry_safe(i, j, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name), \
+ j = list_entry(i->name.next, typeof(*j), name); \
+ &i->name != (l); \
+ i = j, j = list_entry(j->name.next, typeof(*j), name))
+#define list_del(i) \
+do { \
+ (i)->next->prev = (i)->prev; \
+ (i)->prev->next = (i)->next; \
+} while (0)
+
+#endif /* __DPAA_LIST_H */
diff --git a/drivers/bus/dpaa/include/dpaa_rbtree.h b/drivers/bus/dpaa/include/dpaa_rbtree.h
new file mode 100644
index 00000000..f8c9b593
--- /dev/null
+++ b/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -0,0 +1,143 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_RBTREE_H
+#define __DPAA_RBTREE_H
+
+#include <rte_common.h>
+/************/
+/* RB-trees */
+/************/
+
+/* Linux has a good RB-tree implementation, that we can't use (GPL). It also has
+ * a flat/hooked-in interface that virtually requires license-contamination in
+ * order to write a caller-compatible implementation. Instead, I've created an
+ * RB-tree encapsulation on top of linux's primitives (it does some of the work
+ * the client logic would normally do), and this gives us something we can
+ * reimplement on LWE. Unfortunately there's no good+free RB-tree
+ * implementations out there that are license-compatible and "flat" (ie. no
+ * dynamic allocation). I did find a malloc-based one that I could convert, but
+ * that will be a task for later on. For now, LWE's RB-tree is implemented using
+ * an ordered linked-list.
+ *
+ * Note, the only linux-esque type is "struct rb_node", because it's used
+ * statically in the exported header, so it can't be opaque. Our version doesn't
+ * include a "rb_parent_color" field because we're doing linked-list instead of
+ * a true rb-tree.
+ */
+
+struct rb_node {
+ struct rb_node *prev, *next;
+};
+
+struct dpa_rbtree {
+ struct rb_node *head, *tail;
+};
+
+#define DPAA_RBTREE { NULL, NULL }
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->head = tree->tail = NULL;
+}
+
+#define QMAN_NODE2OBJ(ptr, type, node_field) \
+ (type *)((char *)ptr - offsetof(type, node_field))
+
+#define IMPLEMENT_DPAA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *node = tree->head; \
+ if (!node) { \
+ tree->head = tree->tail = &obj->node_field; \
+ obj->node_field.prev = obj->node_field.next = NULL; \
+ return 0; \
+ } \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (obj->val_field == item->val_field) \
+ return -EBUSY; \
+ if (obj->val_field < item->val_field) { \
+ if (tree->head == node) \
+ tree->head = &obj->node_field; \
+ else \
+ node->prev->next = &obj->node_field; \
+ obj->node_field.prev = node->prev; \
+ obj->node_field.next = node; \
+ node->prev = &obj->node_field; \
+ return 0; \
+ } \
+ node = node->next; \
+ } \
+ obj->node_field.prev = tree->tail; \
+ obj->node_field.next = NULL; \
+ tree->tail->next = &obj->node_field; \
+ tree->tail = &obj->node_field; \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ if (tree->head == &obj->node_field) { \
+ if (tree->tail == &obj->node_field) \
+ /* Only item in the list */ \
+ tree->head = tree->tail = NULL; \
+ else { \
+ /* Is the head, next != NULL */ \
+ tree->head = tree->head->next; \
+ tree->head->prev = NULL; \
+ } \
+ } else { \
+ if (tree->tail == &obj->node_field) { \
+ /* Is the tail, prev != NULL */ \
+ tree->tail = tree->tail->prev; \
+ tree->tail->next = NULL; \
+ } else { \
+ /* Is neither the head nor the tail */ \
+ obj->node_field.prev->next = obj->node_field.next; \
+ obj->node_field.next->prev = obj->node_field.prev; \
+ } \
+ } \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ struct rb_node *node = tree->head; \
+ while (node) { \
+ type *item = QMAN_NODE2OBJ(node, type, node_field); \
+ if (val == item->val_field) \
+ return item; \
+ if (val < item->val_field) \
+ return NULL; \
+ node = node->next; \
+ } \
+ return NULL; \
+}
+
+#endif /* __DPAA_RBTREE_H */
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
new file mode 100644
index 00000000..9890e09c
--- /dev/null
+++ b/drivers/bus/dpaa/include/fman.h
@@ -0,0 +1,458 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FMAN_H
+#define __FMAN_H
+
+#include <stdbool.h>
+#include <net/if.h>
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+
+#include <compat.h>
+
+#ifndef FMAN_DEVICE_PATH
+#define FMAN_DEVICE_PATH "/dev/mem"
+#endif
+
+#define MEMAC_NUM_OF_PADDRS 7 /* Num of additional exact match MAC adr regs */
+
+/* Control and Configuration Register (COMMAND_CONFIG) for MEMAC */
+#define CMD_CFG_LOOPBACK_EN 0x00000400
+/**< 21 XGMII/GMII loopback enable */
+#define CMD_CFG_PROMIS_EN 0x00000010
+/**< 27 Promiscuous operation enable */
+#define CMD_CFG_PAUSE_IGNORE 0x00000100
+/**< 23 Ignore Pause frame quanta */
+
+/* Statistics Configuration Register (STATN_CONFIG) */
+#define STATS_CFG_CLR 0x00000004
+/**< 29 Reset all counters */
+#define STATS_CFG_CLR_ON_RD 0x00000002
+/**< 30 Clear on read */
+#define STATS_CFG_SATURATE 0x00000001
+/**< 31 Saturate at the maximum val */
+
+/**< Max receive frame length mask */
+#define MAXFRM_SIZE_MEMAC 0x00007fe0
+#define MAXFRM_RX_MASK 0x0000ffff
+
+/**< Interface Mode Register Register for MEMAC */
+#define IF_MODE_RLP 0x00000820
+
+/**< Pool Limits */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM 8
+#define FMAN_PORT_OBS_EXT_POOLS_NUM 2
+
+#define FMAN_PORT_CG_MAP_NUM 8
+#define FMAN_PORT_PRS_RESULT_WORDS_NUM 8
+#define FMAN_PORT_BMI_FIFO_UNITS 0x100
+#define FMAN_PORT_IC_OFFSET_UNITS 0x10
+
+#define FMAN_ENABLE_BPOOL_DEPLETION 0xF00000F0
+
+#define HASH_CTRL_MCAST_EN 0x00000100
+#define GROUP_ADDRESS 0x0000010000000000LL
+#define HASH_CTRL_ADDR_MASK 0x0000003F
+
+/* Pre definitions of FMAN interface and Bpool structures */
+struct __fman_if;
+struct fman_if_bpool;
+/* Lists of fman interfaces and bpools */
+TAILQ_HEAD(rte_fman_if_list, __fman_if);
+
+/* Represents the different flavour of network interface */
+enum fman_mac_type {
+ fman_offline = 0,
+ fman_mac_1g,
+ fman_mac_10g,
+};
+
+struct mac_addr {
+ uint32_t mac_addr_l; /**< Lower 32 bits of 48-bit MAC address */
+ uint32_t mac_addr_u; /**< Upper 16 bits of 48-bit MAC address */
+};
+
+struct memac_regs {
+ /* General Control and Status */
+ uint32_t res0000[2];
+ uint32_t command_config; /**< 0x008 Ctrl and cfg */
+ struct mac_addr mac_addr0; /**< 0x00C-0x010 MAC_ADDR_0...1 */
+ uint32_t maxfrm; /**< 0x014 Max frame length */
+ uint32_t res0018[5];
+ uint32_t hashtable_ctrl; /**< 0x02C Hash table control */
+ uint32_t res0030[4];
+ uint32_t ievent; /**< 0x040 Interrupt event */
+ uint32_t tx_ipg_length;
+ /**< 0x044 Transmitter inter-packet-gap */
+ uint32_t res0048;
+ uint32_t imask; /**< 0x04C Interrupt mask */
+ uint32_t res0050;
+ uint32_t pause_quanta[4]; /**< 0x054 Pause quanta */
+ uint32_t pause_thresh[4]; /**< 0x064 Pause quanta threshold */
+ uint32_t rx_pause_status; /**< 0x074 Receive pause status */
+ uint32_t res0078[2];
+ struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];
+ /**< 0x80-0x0B4 mac padr */
+ uint32_t lpwake_timer;
+ /**< 0x0B8 Low Power Wakeup Timer */
+ uint32_t sleep_timer;
+ /**< 0x0BC Transmit EEE Low Power Timer */
+ uint32_t res00c0[8];
+ uint32_t statn_config;
+ /**< 0x0E0 Statistics configuration */
+ uint32_t res00e4[7];
+ /* Rx Statistics Counter */
+ uint32_t reoct_l; /**<Rx Eth Octets Counter */
+ uint32_t reoct_u;
+ uint32_t roct_l; /**<Rx Octet Counters */
+ uint32_t roct_u;
+ uint32_t raln_l; /**<Rx Alignment Error Counter */
+ uint32_t raln_u;
+ uint32_t rxpf_l; /**<Rx valid Pause Frame */
+ uint32_t rxpf_u;
+ uint32_t rfrm_l; /**<Rx Frame counter */
+ uint32_t rfrm_u;
+ uint32_t rfcs_l; /**<Rx frame check seq error */
+ uint32_t rfcs_u;
+ uint32_t rvlan_l; /**<Rx Vlan Frame Counter */
+ uint32_t rvlan_u;
+ uint32_t rerr_l; /**<Rx Frame error */
+ uint32_t rerr_u;
+ uint32_t ruca_l; /**<Rx Unicast */
+ uint32_t ruca_u;
+ uint32_t rmca_l; /**<Rx Multicast */
+ uint32_t rmca_u;
+ uint32_t rbca_l; /**<Rx Broadcast */
+ uint32_t rbca_u;
+ uint32_t rdrp_l; /**<Rx Dropper Packet */
+ uint32_t rdrp_u;
+ uint32_t rpkt_l; /**<Rx packet */
+ uint32_t rpkt_u;
+ uint32_t rund_l; /**<Rx undersized packets */
+ uint32_t rund_u;
+ uint32_t r64_l; /**<Rx 64 byte */
+ uint32_t r64_u;
+ uint32_t r127_l;
+ uint32_t r127_u;
+ uint32_t r255_l;
+ uint32_t r255_u;
+ uint32_t r511_l;
+ uint32_t r511_u;
+ uint32_t r1023_l;
+ uint32_t r1023_u;
+ uint32_t r1518_l;
+ uint32_t r1518_u;
+ uint32_t r1519x_l;
+ uint32_t r1519x_u;
+ uint32_t rovr_l; /**<Rx oversized but good */
+ uint32_t rovr_u;
+ uint32_t rjbr_l; /**<Rx oversized with bad csum */
+ uint32_t rjbr_u;
+ uint32_t rfrg_l; /**<Rx fragment Packet */
+ uint32_t rfrg_u;
+ uint32_t rcnp_l; /**<Rx control packets (0x8808 */
+ uint32_t rcnp_u;
+ uint32_t rdrntp_l; /**<Rx dropped due to FIFO overflow */
+ uint32_t rdrntp_u;
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint32_t teoct_l; /**<Tx eth octets */
+ uint32_t teoct_u;
+ uint32_t toct_l; /**<Tx Octets */
+ uint32_t toct_u;
+ uint32_t res0210[2];
+ uint32_t txpf_l; /**<Tx valid pause frame */
+ uint32_t txpf_u;
+ uint32_t tfrm_l; /**<Tx frame counter */
+ uint32_t tfrm_u;
+ uint32_t tfcs_l; /**<Tx FCS error */
+ uint32_t tfcs_u;
+ uint32_t tvlan_l; /**<Tx Vlan Frame */
+ uint32_t tvlan_u;
+ uint32_t terr_l; /**<Tx frame error */
+ uint32_t terr_u;
+ uint32_t tuca_l; /**<Tx Unicast */
+ uint32_t tuca_u;
+ uint32_t tmca_l; /**<Tx Multicast */
+ uint32_t tmca_u;
+ uint32_t tbca_l; /**<Tx Broadcast */
+ uint32_t tbca_u;
+ uint32_t res0258[2];
+ uint32_t tpkt_l; /**<Tx Packet */
+ uint32_t tpkt_u;
+ uint32_t tund_l; /**<Tx Undersized */
+ uint32_t tund_u;
+ uint32_t t64_l;
+ uint32_t t64_u;
+ uint32_t t127_l;
+ uint32_t t127_u;
+ uint32_t t255_l;
+ uint32_t t255_u;
+ uint32_t t511_l;
+ uint32_t t511_u;
+ uint32_t t1023_l;
+ uint32_t t1023_u;
+ uint32_t t1518_l;
+ uint32_t t1518_u;
+ uint32_t t1519x_l;
+ uint32_t t1519x_u;
+ uint32_t res02a8[6];
+ uint32_t tcnp_l; /**<Tx Control Packet type - 0x8808 */
+ uint32_t tcnp_u;
+ uint32_t res02c8[14];
+ /* Line Interface Control */
+ uint32_t if_mode; /**< 0x300 Interface Mode Control */
+ uint32_t if_status; /**< 0x304 Interface Status */
+ uint32_t res0308[14];
+ /* HiGig/2 */
+ uint32_t hg_config; /**< 0x340 Control and cfg */
+ uint32_t res0344[3];
+ uint32_t hg_pause_quanta; /**< 0x350 Pause quanta */
+ uint32_t res0354[3];
+ uint32_t hg_pause_thresh; /**< 0x360 Pause quanta threshold */
+ uint32_t res0364[3];
+ uint32_t hgrx_pause_status; /**< 0x370 Receive pause status */
+ uint32_t hg_fifos_status; /**< 0x374 fifos status */
+ uint32_t rhm; /**< 0x378 rx messages counter */
+ uint32_t thm; /**< 0x37C tx messages counter */
+};
+
+struct rx_bmi_regs {
+ uint32_t fmbm_rcfg; /**< Rx Configuration */
+ uint32_t fmbm_rst; /**< Rx Status */
+ uint32_t fmbm_rda; /**< Rx DMA attributes*/
+ uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/
+ uint32_t fmbm_rfed; /**< Rx Frame End Data*/
+ uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/
+ uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/
+ uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/
+ uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/
+ uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/
+ uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/
+ uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/
+ uint32_t fmbm_rpp; /**< Rx Policer Profile */
+ uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */
+ uint32_t fmbm_reth; /**< Rx Excessive Threshold */
+ uint32_t reserved003c[1]; /**< (0x03C 0x03F) */
+ uint32_t fmbm_rprai[FMAN_PORT_PRS_RESULT_WORDS_NUM];
+ /**< Rx Parse Results Array Init*/
+ uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/
+ uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/
+ uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/
+ uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/
+ uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */
+ uint32_t reserved0074[0x2]; /**< (0x074-0x07C) */
+ uint32_t fmbm_rcmne;
+ /**< Rx Frame Continuous Mode Next Engine */
+ uint32_t reserved0080[0x20];/**< (0x080 0x0FF) */
+ uint32_t fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Buffer Manager pool Information-*/
+ uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
+ /**< Allocate Counter-*/
+ uint32_t reserved0130[8];
+ /**< 0x130/0x140 - 0x15F reserved -*/
+ uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
+ /**< Congestion Group Map*/
+ uint32_t fmbm_mpd; /**< BM Pool Depletion */
+ uint32_t reserved0184[0x1F]; /**< (0x184 0x1FF) */
+ uint32_t fmbm_rstc; /**< Rx Statistics Counters*/
+ uint32_t fmbm_rfrc; /**< Rx Frame Counter*/
+ uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/
+ uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/
+ uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/
+ uint32_t fmbm_rfdc; /**< Rx Frame Discard Counter*/
+ uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/
+ uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard nntr*/
+ uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter*/
+ uint32_t reserved0224[0x17]; /**< (0x224 0x27F) */
+ uint32_t fmbm_rpc; /**< Rx Performance Counters*/
+ uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/
+ uint32_t fmbm_rccn; /**< Rx Cycle Counter*/
+ uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/
+ uint32_t fmbm_rrquc;
+ /**< Rx Receive Queue Utilization cntr*/
+ uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/
+ uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/
+ uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/
+ uint32_t reserved02a0[0x18]; /**< (0x2A0 0x2FF) */
+ uint32_t fmbm_rdbg; /**< Rx Debug-*/
+};
+
+struct fman_port_qmi_regs {
+ uint32_t fmqm_pnc; /**< PortID n Configuration Register */
+ uint32_t fmqm_pns; /**< PortID n Status Register */
+ uint32_t fmqm_pnts; /**< PortID n Task Status Register */
+ uint32_t reserved00c[4]; /**< 0xn00C - 0xn01B */
+ uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */
+ uint32_t fmqm_pnetfc; /**< PortID n Enq Total Frame Counter */
+ uint32_t reserved024[2]; /**< 0xn024 - 0x02B */
+ uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */
+ uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */
+ uint32_t fmqm_pndtfc; /**< PortID n Dequeue tot Frame cntr */
+ uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID Dflt Cntr */
+ uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */
+};
+
+/* This struct exports parameters about an Fman network interface, determined
+ * from the device-tree.
+ */
+struct fman_if {
+ /* Which Fman this interface belongs to */
+ uint8_t fman_idx;
+ /* The type/speed of the interface */
+ enum fman_mac_type mac_type;
+ /* Boolean, set when mac type is memac */
+ uint8_t is_memac;
+ /* Boolean, set when PHY is RGMII */
+ uint8_t is_rgmii;
+ /* The index of this MAC (within the Fman it belongs to) */
+ uint8_t mac_idx;
+ /* The MAC address */
+ struct ether_addr mac_addr;
+ /* The Qman channel to schedule Tx FQs to */
+ u16 tx_channel_id;
+ /* The hard-coded FQIDs for this interface. Note: this doesn't cover
+ * the PCD nor the "Rx default" FQIDs, which are configured via FMC
+ * and its XML-based configuration.
+ */
+ uint32_t fqid_rx_def;
+ uint32_t fqid_rx_err;
+ uint32_t fqid_tx_err;
+ uint32_t fqid_tx_confirm;
+
+ struct list_head bpool_list;
+ /* The node for linking this interface into "fman_if_list" */
+ struct list_head node;
+};
+
+/* This struct exposes parameters for buffer pools, extracted from the network
+ * interface settings in the device tree.
+ */
+struct fman_if_bpool {
+ uint32_t bpid;
+ uint64_t count;
+ uint64_t size;
+ uint64_t addr;
+ /* The node for linking this bpool into fman_if::bpool_list */
+ struct list_head node;
+};
+
+/* Internal Context transfer params - FMBM_RICP*/
+struct fman_if_ic_params {
+ /*IC offset in the packet buffer */
+ uint16_t iceof;
+ /*IC internal offset */
+ uint16_t iciof;
+ /*IC size to copy */
+ uint16_t icsz;
+};
+
+/* The exported "struct fman_if" type contains the subset of fields we want
+ * exposed. This struct is embedded in a larger "struct __fman_if" which
+ * contains the extra bits we *don't* want exposed.
+ */
+struct __fman_if {
+ struct fman_if __if;
+ char node_path[PATH_MAX];
+ uint64_t regs_size;
+ void *ccsr_map;
+ void *bmi_map;
+ void *qmi_map;
+ struct list_head node;
+};
+
+/* And this is the base list node that the interfaces are added to. (See
+ * fman_if_enable_all_rx() below for an example of its use.)
+ */
+extern const struct list_head *fman_if_list;
+
+extern int fman_ccsr_map_fd;
+
+/* To iterate the "bpool_list" for an interface. Eg;
+ * struct fman_if *p = get_ptr_to_some_interface();
+ * struct fman_if_bpool *bp;
+ * printf("Interface uses following BPIDs;\n");
+ * fman_if_for_each_bpool(bp, p) {
+ * printf(" %d\n", bp->bpid);
+ * [...]
+ * }
+ */
+#define fman_if_for_each_bpool(bp, __if) \
+ list_for_each_entry(bp, &(__if)->bpool_list, node)
+
+#define FMAN_ERR(rc, fmt, args...) \
+ do { \
+ _errno = (rc); \
+ DPAA_BUS_LOG(ERR, fmt "(%d)", ##args, errno); \
+ } while (0)
+
+#define FMAN_IP_REV_1 0xC30C4
+#define FMAN_IP_REV_1_MAJOR_MASK 0x0000FF00
+#define FMAN_IP_REV_1_MAJOR_SHIFT 8
+#define FMAN_V3 0x06
+#define FMAN_V3_CONTEXTA_EN_A2V 0x10000000
+#define FMAN_V3_CONTEXTA_EN_OVOM 0x02000000
+#define FMAN_V3_CONTEXTA_EN_EBD 0x80000000
+#define FMAN_CONTEXTA_DIS_CHECKSUM 0x7ull
+#define FMAN_CONTEXTA_SET_OPCODE11 0x2000000b00000000
+extern u16 fman_ip_rev;
+extern u32 fman_dealloc_bufs_mask_hi;
+extern u32 fman_dealloc_bufs_mask_lo;
+
+/**
+ * Initialize the FMAN driver
+ *
+ * @args void
+ * @return
+ * 0 for success; error OTHERWISE
+ */
+int fman_init(void);
+
+/**
+ * Teardown the FMAN driver
+ *
+ * @args void
+ * @return void
+ */
+void fman_finish(void);
+
+#endif /* __FMAN_H */
diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h
new file mode 100644
index 00000000..383106b1
--- /dev/null
+++ b/drivers/bus/dpaa/include/fsl_bman.h
@@ -0,0 +1,375 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This wrapper represents a bit-array for the depletion state of the 64 Bman
+ * buffer pools.
+ */
+struct bman_depletion {
+ u32 state[2];
+};
+
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = 0;
+}
+
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+ c->state[0] = c->state[1] = ~0;
+}
+
+/* --- Bman data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry; /* RCR (Release Command Ring) entries */
+struct bm_mc_command; /* MC (Management Command) command */
+struct bm_mc_result; /* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used.
+ */
+struct bm_buffer {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1;
+ u8 bpid;
+ u16 hi; /* High 16-bits of 48-bit address */
+ u32 lo; /* Low 32-bits of 48-bit address */
+#else
+ u32 lo;
+ u16 hi;
+ u8 bpid;
+ u8 __reserved;
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:16;
+ u64 addr:48;
+#else
+ u64 addr:48;
+ u64 __notaddress:16;
+#endif
+ };
+ u64 opaque;
+ };
+} __attribute__((aligned(8)));
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return buf->addr;
+}
+
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return (dma_addr_t)buf->addr;
+}
+
+#define bm_buffer_set64(buf, v) \
+ do { \
+ struct bm_buffer *__buf931 = (buf); \
+ __buf931->hi = upper_32_bits(v); \
+ __buf931->lo = lower_32_bits(v); \
+ } while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 __dont_write_directly__verb;
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+} __packed;
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+ u8 bpid;
+ u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+ u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct bm_mcc_acquire acquire;
+ struct bm_mcc_query query;
+ };
+} __packed;
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+ u8 __reserved1[32];
+ /* "availability state" and "depletion state" */
+ struct {
+ u8 __reserved1[8];
+ /* Access using bman_depletion_***() */
+ struct bman_depletion state;
+ } as, ds;
+};
+
+struct bm_mc_result {
+ union {
+ struct {
+ u8 verb;
+ u8 __reserved1[63];
+ };
+ union {
+ struct {
+ u8 __reserved1;
+ u8 bpid;
+ u8 __reserved2[62];
+ };
+ struct bm_buffer bufs[8];
+ } acquire;
+ struct bm_pool_state query;
+ };
+} __packed;
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+
+/* Portal and Buffer Pools */
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents Bman buffer pools. */
+struct bman_pool;
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+ /* index of the buffer pool to encapsulate (0-63), ignored if
+ * BMAN_POOL_FLAG_DYNAMIC_BPID is set.
+ */
+ u32 bpid;
+ /* bit-mask of BMAN_POOL_FLAG_*** options */
+ u32 flags;
+ /* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+ * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+ * when run in the control plane (which controls Bman CCSR). This array
+ * matches the definition of bm_pool_set().
+ */
+ u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE 0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE 0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID 0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH 0x00000010 /* set depletion thresholds */
+
+/* Flags to bman_release() */
+#define BMAN_RELEASE_FLAG_NOW 0x00000008 /* issue immediate release */
+
+
+/**
+ * bman_get_portal_index - get portal configuration index
+ */
+int bman_get_portal_index(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by Bman but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+ int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, unsigned int count);
+static inline void bman_release_bpid(u32 bpid)
+{
+ bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, unsigned int count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+ return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, unsigned int count);
+
+int bman_shutdown_pool(u32 bpid);
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+
+/**
+ * bm_pool_set_hw_threshold - Change the buffer pool's thresholds
+ * @pool: Pool id
+ * @low_thresh: low threshold
+ * @high_thresh: high threshold
+ */
+int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
+ const u32 high_thresh);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_BMAN_H */
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
new file mode 100644
index 00000000..95aee67a
--- /dev/null
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -0,0 +1,181 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_FMAN_H
+#define __FSL_FMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Status field in FD is updated on Rx side by FMAN with following information.
+ * Refer to field description in FM BG.
+ */
+struct fm_status_t {
+ unsigned int reserved0:3;
+ unsigned int dcl4c:1; /* Don't Check L4 Checksum */
+ unsigned int reserved1:1;
+ unsigned int ufd:1; /* Unsupported Format */
+ unsigned int lge:1; /* Length Error */
+ unsigned int dme:1; /* DMA Error */
+
+ unsigned int reserved2:4;
+ unsigned int fpe:1; /* Frame physical Error */
+ unsigned int fse:1; /* Frame Size Error */
+ unsigned int dis:1; /* Discard by Classification */
+ unsigned int reserved3:1;
+
+ unsigned int eof:1; /* Key Extraction goes out of frame */
+ unsigned int nss:1; /* No Scheme selected */
+ unsigned int kso:1; /* Key Size Overflow */
+ unsigned int reserved4:1;
+ unsigned int fcl:2; /* Frame Color */
+ unsigned int ipp:1; /* Illegal Policer Profile Selected */
+ unsigned int flm:1; /* Frame Length Mismatch */
+ unsigned int pte:1; /* Parser Timeout */
+ unsigned int isp:1; /* Invalid Soft Parser Instruction */
+ unsigned int phe:1; /* Header Error during parsing */
+ unsigned int frdr:1; /* Frame Dropped by disabled port */
+ unsigned int reserved5:4;
+} __attribute__ ((__packed__));
+
+/* Set MAC address for a particular interface */
+int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
+
+/* Remove a MAC address for a particular interface */
+void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
+
+/* Get the FMAN statistics */
+void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
+
+/* Reset the FMAN statistics */
+void fman_if_stats_reset(struct fman_if *p);
+
+/* Get all of the FMAN statistics */
+void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
+
+/* Set ignore pause option for a specific interface */
+void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
+
+/* Set max frame length */
+void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len);
+
+/* Enable/disable Rx promiscuous mode on specified interface */
+void fman_if_promiscuous_enable(struct fman_if *p);
+void fman_if_promiscuous_disable(struct fman_if *p);
+
+/* Enable/disable Rx on specific interfaces */
+void fman_if_enable_rx(struct fman_if *p);
+void fman_if_disable_rx(struct fman_if *p);
+
+/* Enable/disable loopback on specific interfaces */
+void fman_if_loopback_enable(struct fman_if *p);
+void fman_if_loopback_disable(struct fman_if *p);
+
+/* Set buffer pool on specific interface */
+void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
+ size_t bufsize);
+
+/* Get Flow Control threshold parameters on specific interface */
+int fman_if_get_fc_threshold(struct fman_if *fm_if);
+
+/* Enable and Set Flow Control threshold parameters on specific interface */
+int fman_if_set_fc_threshold(struct fman_if *fm_if,
+ u32 high_water, u32 low_water, u32 bpid);
+
+/* Get Flow Control pause quanta on specific interface */
+int fman_if_get_fc_quanta(struct fman_if *fm_if);
+
+/* Set Flow Control pause quanta on specific interface */
+int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
+
+/* Set default error fqid on specific interface */
+void fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid);
+
+/* Get IC transfer params */
+int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp);
+
+/* Set IC transfer params */
+int fman_if_set_ic_params(struct fman_if *fm_if,
+ const struct fman_if_ic_params *icp);
+
+/* Get interface fd->offset value */
+int fman_if_get_fdoff(struct fman_if *fm_if);
+
+/* Set interface fd->offset value */
+void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+
+/* Get interface Max Frame length (MTU) */
+uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
+
+/* Set interface Max Frame length (MTU) */
+void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
+
+/* Set interface next invoked action for dequeue operation */
+void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
+
+/* discard error packets on rx */
+void fman_if_discard_rx_errors(struct fman_if *fm_if);
+
+void fman_if_set_mcast_filter_table(struct fman_if *p);
+
+void fman_if_reset_mcast_filter_table(struct fman_if *p);
+
+int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
+
+int fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth);
+
+
+/* Enable/disable Rx on all interfaces */
+static inline void fman_if_enable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_enable_rx(__if);
+}
+
+static inline void fman_if_disable_all_rx(void)
+{
+ struct fman_if *__if;
+
+ list_for_each_entry(__if, fman_if_list, node)
+ fman_if_disable_rx(__if);
+}
+#endif /* __FSL_FMAN_H */
diff --git a/drivers/bus/dpaa/include/fsl_fman_crc64.h b/drivers/bus/dpaa/include/fsl_fman_crc64.h
new file mode 100644
index 00000000..af5803f3
--- /dev/null
+++ b/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -0,0 +1,263 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_FMAN_CRC64_H
+#define __FSL_FMAN_CRC64_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This following definitions provide a software implementation of the CRC64
+ * algorithm implemented within Fman.
+ *
+ * The following example shows how to compute a CRC64 hash value based on
+ * SRC_IP, DST_IP and ESP_SPI values
+ *
+ * #define compute_hash(saddr,daddr,spi) \
+ * do { \
+ * uint64_t result; \
+ * result = fman_crc64_init(); \
+ * result = fman_crc64_compute_32bit(saddr, result); \
+ * result = fman_crc64_compute_32bit(daddr, result); \
+ * result = fman_crc64_compute_32bit(spi, result); \
+ * return (uint32_t) result & RC_HASH_MASK; \
+ * } while (0);
+ *
+ * If hashing over a different number of fields (or of different types) is
+ * required, this can be implemented using the following primitives.
+ */
+
+/* The following table provides the constants used by the Fman CRC64
+ * implementation. The table is instantiated within the DPAA fman driver.
+ * However if the application is not going to be linked against the DPAA fman
+ * driver but will use this Fman CRC64 implementation, then it will need to
+ * instantiate this table by using the DECLARE_FMAN_CRC64_TABLE() macro.
+ */
+struct fman_crc64_t {
+ uint64_t initial;
+ uint64_t table[1 << 8];
+};
+extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+#define DECLARE_FMAN_CRC64_TABLE() \
+struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+ 0xFFFFFFFFFFFFFFFFULL, \
+ { \
+ 0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
+ 0xf4843657a840a05bULL, 0x47aa7ae9abe7ff34ULL, \
+ 0x7bd0c384ff8f5e33ULL, 0xc8fe8f3afc28015cULL, \
+ 0x8f54f5d357cffe68ULL, 0x3c7ab96d5468a107ULL, \
+ 0xf7a18709ff1ebc66ULL, 0x448fcbb7fcb9e309ULL, \
+ 0x0325b15e575e1c3dULL, 0xb00bfde054f94352ULL, \
+ 0x8c71448d0091e255ULL, 0x3f5f08330336bd3aULL, \
+ 0x78f572daa8d1420eULL, 0xcbdb3e64ab761d61ULL, \
+ 0x7d9ba13851336649ULL, 0xceb5ed8652943926ULL, \
+ 0x891f976ff973c612ULL, 0x3a31dbd1fad4997dULL, \
+ 0x064b62bcaebc387aULL, 0xb5652e02ad1b6715ULL, \
+ 0xf2cf54eb06fc9821ULL, 0x41e11855055bc74eULL, \
+ 0x8a3a2631ae2dda2fULL, 0x39146a8fad8a8540ULL, \
+ 0x7ebe1066066d7a74ULL, 0xcd905cd805ca251bULL, \
+ 0xf1eae5b551a2841cULL, 0x42c4a90b5205db73ULL, \
+ 0x056ed3e2f9e22447ULL, 0xb6409f5cfa457b28ULL, \
+ 0xfb374270a266cc92ULL, 0x48190ecea1c193fdULL, \
+ 0x0fb374270a266cc9ULL, 0xbc9d3899098133a6ULL, \
+ 0x80e781f45de992a1ULL, 0x33c9cd4a5e4ecdceULL, \
+ 0x7463b7a3f5a932faULL, 0xc74dfb1df60e6d95ULL, \
+ 0x0c96c5795d7870f4ULL, 0xbfb889c75edf2f9bULL, \
+ 0xf812f32ef538d0afULL, 0x4b3cbf90f69f8fc0ULL, \
+ 0x774606fda2f72ec7ULL, 0xc4684a43a15071a8ULL, \
+ 0x83c230aa0ab78e9cULL, 0x30ec7c140910d1f3ULL, \
+ 0x86ace348f355aadbULL, 0x3582aff6f0f2f5b4ULL, \
+ 0x7228d51f5b150a80ULL, 0xc10699a158b255efULL, \
+ 0xfd7c20cc0cdaf4e8ULL, 0x4e526c720f7dab87ULL, \
+ 0x09f8169ba49a54b3ULL, 0xbad65a25a73d0bdcULL, \
+ 0x710d64410c4b16bdULL, 0xc22328ff0fec49d2ULL, \
+ 0x85895216a40bb6e6ULL, 0x36a71ea8a7ace989ULL, \
+ 0x0adda7c5f3c4488eULL, 0xb9f3eb7bf06317e1ULL, \
+ 0xfe5991925b84e8d5ULL, 0x4d77dd2c5823b7baULL, \
+ 0x64b62bcaebc387a1ULL, 0xd7986774e864d8ceULL, \
+ 0x90321d9d438327faULL, 0x231c512340247895ULL, \
+ 0x1f66e84e144cd992ULL, 0xac48a4f017eb86fdULL, \
+ 0xebe2de19bc0c79c9ULL, 0x58cc92a7bfab26a6ULL, \
+ 0x9317acc314dd3bc7ULL, 0x2039e07d177a64a8ULL, \
+ 0x67939a94bc9d9b9cULL, 0xd4bdd62abf3ac4f3ULL, \
+ 0xe8c76f47eb5265f4ULL, 0x5be923f9e8f53a9bULL, \
+ 0x1c4359104312c5afULL, 0xaf6d15ae40b59ac0ULL, \
+ 0x192d8af2baf0e1e8ULL, 0xaa03c64cb957be87ULL, \
+ 0xeda9bca512b041b3ULL, 0x5e87f01b11171edcULL, \
+ 0x62fd4976457fbfdbULL, 0xd1d305c846d8e0b4ULL, \
+ 0x96797f21ed3f1f80ULL, 0x2557339fee9840efULL, \
+ 0xee8c0dfb45ee5d8eULL, 0x5da24145464902e1ULL, \
+ 0x1a083bacedaefdd5ULL, 0xa9267712ee09a2baULL, \
+ 0x955cce7fba6103bdULL, 0x267282c1b9c65cd2ULL, \
+ 0x61d8f8281221a3e6ULL, 0xd2f6b4961186fc89ULL, \
+ 0x9f8169ba49a54b33ULL, 0x2caf25044a02145cULL, \
+ 0x6b055fede1e5eb68ULL, 0xd82b1353e242b407ULL, \
+ 0xe451aa3eb62a1500ULL, 0x577fe680b58d4a6fULL, \
+ 0x10d59c691e6ab55bULL, 0xa3fbd0d71dcdea34ULL, \
+ 0x6820eeb3b6bbf755ULL, 0xdb0ea20db51ca83aULL, \
+ 0x9ca4d8e41efb570eULL, 0x2f8a945a1d5c0861ULL, \
+ 0x13f02d374934a966ULL, 0xa0de61894a93f609ULL, \
+ 0xe7741b60e174093dULL, 0x545a57dee2d35652ULL, \
+ 0xe21ac88218962d7aULL, 0x5134843c1b317215ULL, \
+ 0x169efed5b0d68d21ULL, 0xa5b0b26bb371d24eULL, \
+ 0x99ca0b06e7197349ULL, 0x2ae447b8e4be2c26ULL, \
+ 0x6d4e3d514f59d312ULL, 0xde6071ef4cfe8c7dULL, \
+ 0x15bb4f8be788911cULL, 0xa6950335e42fce73ULL, \
+ 0xe13f79dc4fc83147ULL, 0x521135624c6f6e28ULL, \
+ 0x6e6b8c0f1807cf2fULL, 0xdd45c0b11ba09040ULL, \
+ 0x9aefba58b0476f74ULL, 0x29c1f6e6b3e0301bULL, \
+ 0xc96c5795d7870f42ULL, 0x7a421b2bd420502dULL, \
+ 0x3de861c27fc7af19ULL, 0x8ec62d7c7c60f076ULL, \
+ 0xb2bc941128085171ULL, 0x0192d8af2baf0e1eULL, \
+ 0x4638a2468048f12aULL, 0xf516eef883efae45ULL, \
+ 0x3ecdd09c2899b324ULL, 0x8de39c222b3eec4bULL, \
+ 0xca49e6cb80d9137fULL, 0x7967aa75837e4c10ULL, \
+ 0x451d1318d716ed17ULL, 0xf6335fa6d4b1b278ULL, \
+ 0xb199254f7f564d4cULL, 0x02b769f17cf11223ULL, \
+ 0xb4f7f6ad86b4690bULL, 0x07d9ba1385133664ULL, \
+ 0x4073c0fa2ef4c950ULL, 0xf35d8c442d53963fULL, \
+ 0xcf273529793b3738ULL, 0x7c0979977a9c6857ULL, \
+ 0x3ba3037ed17b9763ULL, 0x888d4fc0d2dcc80cULL, \
+ 0x435671a479aad56dULL, 0xf0783d1a7a0d8a02ULL, \
+ 0xb7d247f3d1ea7536ULL, 0x04fc0b4dd24d2a59ULL, \
+ 0x3886b22086258b5eULL, 0x8ba8fe9e8582d431ULL, \
+ 0xcc0284772e652b05ULL, 0x7f2cc8c92dc2746aULL, \
+ 0x325b15e575e1c3d0ULL, 0x8175595b76469cbfULL, \
+ 0xc6df23b2dda1638bULL, 0x75f16f0cde063ce4ULL, \
+ 0x498bd6618a6e9de3ULL, 0xfaa59adf89c9c28cULL, \
+ 0xbd0fe036222e3db8ULL, 0x0e21ac88218962d7ULL, \
+ 0xc5fa92ec8aff7fb6ULL, 0x76d4de52895820d9ULL, \
+ 0x317ea4bb22bfdfedULL, 0x8250e80521188082ULL, \
+ 0xbe2a516875702185ULL, 0x0d041dd676d77eeaULL, \
+ 0x4aae673fdd3081deULL, 0xf9802b81de97deb1ULL, \
+ 0x4fc0b4dd24d2a599ULL, 0xfceef8632775faf6ULL, \
+ 0xbb44828a8c9205c2ULL, 0x086ace348f355aadULL, \
+ 0x34107759db5dfbaaULL, 0x873e3be7d8faa4c5ULL, \
+ 0xc094410e731d5bf1ULL, 0x73ba0db070ba049eULL, \
+ 0xb86133d4dbcc19ffULL, 0x0b4f7f6ad86b4690ULL, \
+ 0x4ce50583738cb9a4ULL, 0xffcb493d702be6cbULL, \
+ 0xc3b1f050244347ccULL, 0x709fbcee27e418a3ULL, \
+ 0x3735c6078c03e797ULL, 0x841b8ab98fa4b8f8ULL, \
+ 0xadda7c5f3c4488e3ULL, 0x1ef430e13fe3d78cULL, \
+ 0x595e4a08940428b8ULL, 0xea7006b697a377d7ULL, \
+ 0xd60abfdbc3cbd6d0ULL, 0x6524f365c06c89bfULL, \
+ 0x228e898c6b8b768bULL, 0x91a0c532682c29e4ULL, \
+ 0x5a7bfb56c35a3485ULL, 0xe955b7e8c0fd6beaULL, \
+ 0xaeffcd016b1a94deULL, 0x1dd181bf68bdcbb1ULL, \
+ 0x21ab38d23cd56ab6ULL, 0x9285746c3f7235d9ULL, \
+ 0xd52f0e859495caedULL, 0x6601423b97329582ULL, \
+ 0xd041dd676d77eeaaULL, 0x636f91d96ed0b1c5ULL, \
+ 0x24c5eb30c5374ef1ULL, 0x97eba78ec690119eULL, \
+ 0xab911ee392f8b099ULL, 0x18bf525d915feff6ULL, \
+ 0x5f1528b43ab810c2ULL, 0xec3b640a391f4fadULL, \
+ 0x27e05a6e926952ccULL, 0x94ce16d091ce0da3ULL, \
+ 0xd3646c393a29f297ULL, 0x604a2087398eadf8ULL, \
+ 0x5c3099ea6de60cffULL, 0xef1ed5546e415390ULL, \
+ 0xa8b4afbdc5a6aca4ULL, 0x1b9ae303c601f3cbULL, \
+ 0x56ed3e2f9e224471ULL, 0xe5c372919d851b1eULL, \
+ 0xa26908783662e42aULL, 0x114744c635c5bb45ULL, \
+ 0x2d3dfdab61ad1a42ULL, 0x9e13b115620a452dULL, \
+ 0xd9b9cbfcc9edba19ULL, 0x6a978742ca4ae576ULL, \
+ 0xa14cb926613cf817ULL, 0x1262f598629ba778ULL, \
+ 0x55c88f71c97c584cULL, 0xe6e6c3cfcadb0723ULL, \
+ 0xda9c7aa29eb3a624ULL, 0x69b2361c9d14f94bULL, \
+ 0x2e184cf536f3067fULL, 0x9d36004b35545910ULL, \
+ 0x2b769f17cf112238ULL, 0x9858d3a9ccb67d57ULL, \
+ 0xdff2a94067518263ULL, 0x6cdce5fe64f6dd0cULL, \
+ 0x50a65c93309e7c0bULL, 0xe388102d33392364ULL, \
+ 0xa4226ac498dedc50ULL, 0x170c267a9b79833fULL, \
+ 0xdcd7181e300f9e5eULL, 0x6ff954a033a8c131ULL, \
+ 0x28532e49984f3e05ULL, 0x9b7d62f79be8616aULL, \
+ 0xa707db9acf80c06dULL, 0x14299724cc279f02ULL, \
+ 0x5383edcd67c06036ULL, 0xe0ada17364673f59ULL} \
+}
+
+/*
+ * Return the initial CRC seed. Use the value returned from this API as the
+ * "crc" parameter to the first call to add data.
+ */
+static inline uint64_t fman_crc64_init(void)
+{
+ return FMAN_CRC64_ECMA_182.initial;
+}
+
+/* Updates the CRC with arbitrary data */
+static inline uint64_t fman_crc64_update(uint64_t crc,
+ void *data, unsigned int len)
+{
+ uint8_t *p = data;
+ while (len--)
+ crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ (crc >> 8);
+ return crc;
+}
+
+/* Shorthands for updating the CRC with 8/16/32 bits of data.
+ * IMPORTANT NOTE: the typed "data" arguments should not be mistaken for
+ * host-endian numerical values, the assumption is that these values contain
+ * big-endian (ie. network byte order) data.
+ */
+static inline uint64_t fman_crc64_compute_32bit(uint32_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_16bit(uint16_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+static inline uint64_t fman_crc64_compute_8bit(uint8_t data, uint64_t crc)
+{
+ return fman_crc64_update(crc, &data, sizeof(data));
+}
+
+/*
+ * Finalise the CRC (using 2's complement)
+ */
+static inline uint64_t fman_crc64_finish(uint64_t seed)
+{
+ return ~seed;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_FMAN_CRC64_H */
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
new file mode 100644
index 00000000..eedfd7ea
--- /dev/null
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -0,0 +1,2021 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <dpaa_rbtree.h>
+
+/* FQ lookups (turn this on for 64bit user-space) */
+#if (__WORDSIZE == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP
+/* if FQ lookups are supported, this controls the number of initialised,
+ * s/w-consumed FQs that can be supported at any one time.
+ */
+#define CONFIG_FSL_QMAN_FQ_LOOKUP_MAX (32 * 1024)
+#endif
+
+/* Last updated for v00.800 of the BG */
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1,
+ qm_dc_portal_caam = 2,
+ qm_dc_portal_pme = 3
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CCSCI 0x00200000 /* CEETM Congestion State Change */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI | QM_PIRQ_CCSCI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry; /* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry; /* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry; /* MR (Message Ring) entries */
+struct qm_mc_command; /* MC (Management Command) command */
+struct qm_mc_result; /* MC result */
+
+#define QM_FD_FORMAT_SG 0x4
+#define QM_FD_FORMAT_LONG 0x2
+#define QM_FD_FORMAT_COMPOUND 0x1
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG 0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG (QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND QM_FD_FORMAT_COMPOUND
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 dd:2; /* dynamic debug */
+ u8 liodn_offset:6;
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 eliodn_offset:4;
+ u8 __reserved:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u8 liodn_offset:6;
+ u8 dd:2; /* dynamic debug */
+ u8 bpid:8; /* Buffer Pool ID */
+ u8 __reserved:4;
+ u8 eliodn_offset:4;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#endif
+ };
+ struct {
+ u64 __notaddress:24;
+ /* More efficient address accessor */
+ u64 addr:40;
+ };
+ u64 opaque_addr;
+ };
+ /* The 'format' field indicates the interpretation of the remaining 29
+ * bits of the 32-bit word. For packing reasons, it is duplicated in the
+ * other union elements. Note, union'd structs are difficult to use with
+ * static initialisation under gcc, in which case use the "opaque" form
+ * with one of the macros.
+ */
+ union {
+ /* For easier/faster copying of this part of the fd (eg. from a
+ * DQRR entry to an EQCR entry) copy 'opaque'
+ */
+ u32 opaque;
+ /* If 'format' is _contig or _sg, 20b length and 9b offset */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format format:3;
+ u16 offset:9;
+ u32 length20:20;
+#else
+ u32 length20:20;
+ u16 offset:9;
+ enum qm_fd_format format:3;
+#endif
+ };
+ /* If 'format' is _contig_big or _sg_big, 29b length */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format1:3;
+ u32 length29:29;
+#else
+ u32 length29:29;
+ enum qm_fd_format _format1:3;
+#endif
+ };
+ /* If 'format' is _compound, 29b "congestion weight" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ enum qm_fd_format _format2:3;
+ u32 cong_weight:29;
+#else
+ u32 cong_weight:29;
+ enum qm_fd_format _format2:3;
+#endif
+ };
+ };
+ union {
+ u32 cmd;
+ u32 status;
+ };
+} __attribute__((aligned(8)));
+#define QM_FD_DD_NULL 0x00
+#define QM_FD_PID_MASK 0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return (dma_addr_t)fd->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+ do { \
+ struct qm_fd *__fd931 = (fd); \
+ __fd931->addr = v; \
+ } while (0)
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+#else
+ u32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[3];
+#endif
+ };
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u64 __notaddress:24;
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 __notaddress:24;
+#endif
+ };
+ u64 opaque;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 extension:1; /* Extension bit */
+ u32 final:1; /* Final bit */
+ u32 length:30;
+#else
+ u32 length:30;
+ u32 final:1; /* Final bit */
+ u32 extension:1; /* Extension bit */
+#endif
+ };
+ u32 val;
+ };
+ u8 __reserved2;
+ u8 bpid;
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved3:3;
+ u16 offset:13;
+#else
+ u16 offset:13;
+ u16 __reserved3:3;
+#endif
+ };
+ u16 val_off;
+ };
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return sg->addr;
+}
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return (dma_addr_t)sg->addr;
+}
+
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+ do { \
+ struct qm_sg_entry *__sg931 = (sg); \
+ __sg931->addr = v; \
+ } while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 __dont_write_directly__verb;
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd;
+ u8 __reserved4[32];
+};
+
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+struct qm_mr_entry {
+ u8 verb;
+ union {
+ struct {
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rejection Code */
+ u32 orp:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ } __packed ern;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+ u8 __reserved1:4;
+ enum qm_dc_portal portal:2;
+#else
+ enum qm_dc_portal portal:3;
+ u8 __reserved1:3;
+ u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
+#endif
+ u16 __reserved2;
+ u8 rc; /* Rejection Code */
+ u32 __reserved3:24;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ } __packed dcern;
+ struct {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[16];
+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ };
+ u8 __reserved2[32];
+} __packed;
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN 0x00
+#define QM_MR_DCERN_COLOUR_YELLOW 0x01
+#define QM_MR_DCERN_COLOUR_RED 0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE 0x03
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 exclusive;
+ u8 __reserved1:2;
+ /* Numbers of cachelines */
+ u8 annotation_cl:2;
+ u8 data_cl:2;
+ u8 context_cl:2;
+#else
+ u8 context_cl:2;
+ u8 data_cl:2;
+ u8 annotation_cl:2;
+ u8 __reserved1:2;
+ u8 exclusive;
+#endif
+} __packed;
+struct qm_fqd_taildrop {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved1:3;
+ u16 mant:8;
+ u16 exp:5;
+#else
+ u16 exp:5;
+ u16 mant:8;
+ u16 __reserved1:3;
+#endif
+} __packed;
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 oac:2; /* "Overhead Accounting Control" */
+ u8 __reserved1:6;
+#else
+ u8 __reserved1:6;
+ u8 oac:2; /* "Overhead Accounting Control" */
+#endif
+ /* Two's-complement value (-128 to +127) */
+ signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+ union {
+ u8 orpc;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved1:2;
+ u8 orprws:3;
+ u8 oa:1;
+ u8 olws:2;
+#else
+ u8 olws:2;
+ u8 oa:1;
+ u8 orprws:3;
+ u8 __reserved1:2;
+#endif
+ } __packed;
+ };
+ u8 cgid;
+ u16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ union {
+ u16 dest_wq;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 channel:13; /* qm_channel */
+ u16 wq:3;
+#else
+ u16 wq:3;
+ u16 channel:13; /* qm_channel */
+#endif
+ } __packed dest;
+ };
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#else
+ u16 __reserved2:1;
+ u16 ics_cred:15;
+#endif
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ uint16_t opaque_td;
+ struct qm_fqd_taildrop td;
+ struct qm_fqd_oac oac_init;
+ };
+ u32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ u64 opaque;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 hi;
+ u32 lo;
+#else
+ u32 lo;
+ u32 hi;
+#endif
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ u16 context_hi;
+ u32 context_lo;
+#else
+ u32 context_lo;
+ u16 context_hi;
+ struct qm_fqd_stashing stashing;
+#endif
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.context_hi << 32) |
+ (u64)fqd->context_a.context_lo;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return ((u64)fqd->context_a.hi << 32) |
+ (u64)fqd->context_a.lo;
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = upper_32_bits(addr);
+ fqd->context_a.lo = lower_32_bits(addr);
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ if (val > 0xe0000000)
+ return -ERANGE;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ td->exp = e;
+ td->mant = val;
+ return 0;
+}
+
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+ return (u32)td->mant << td->exp;
+}
+
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_ORP 0x0100 /* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ union {
+ u32 word;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 MA:8;
+ u32 Mn:5;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Sn:6;
+ u32 Pn:6;
+#else
+ u32 Pn:6;
+ u32 Sn:6;
+ u32 SA:7; /* must be between 64-127 */
+ u32 Mn:5;
+ u32 MA:8;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ union {
+ u16 hword;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 __reserved:3;
+ u16 TA:8;
+ u16 Tn:5;
+#else
+ u16 Tn:5;
+ u16 TA:8;
+ u16 __reserved:3;
+#endif
+ } __packed;
+ };
+} __packed;
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+#else
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+#endif
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ union {
+ struct qm_cgr_cs_thres cs_thres;
+ /* use qm_cgr_cs_thres_set64() */
+ u16 __cs_thres;
+ };
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return (u64)th->TA << th->Tn;
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->Tn = e;
+ th->TA = val;
+ return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+ u8 __reserved1;
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+ u8 __reserved1[2];
+ u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved2[23];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+ u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved1:3;
+#else
+ u16 __reserved1:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+ u8 __dont_write_directly__verb;
+ union {
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_cgrtestwrite cgrtestwrite;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querycongestion querycongestion;
+ struct qm_mcc_querywq querywq;
+ };
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+struct qm_mcr_initfq {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u8 __reserved2;
+ u32 fqd_link:24;
+ u16 __reserved3:2;
+ u16 odp_seq:14;
+ u16 __reserved4:2;
+ u16 orp_nesn:14;
+ u16 __reserved5:1;
+ u16 orp_ea_hseq:15;
+ u16 __reserved6:1;
+ u16 orp_ea_tseq:15;
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+ u8 __reserved11[5];
+ u8 __reserved12:7;
+ u8 is:1;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#else
+ u8 __reserved2;
+ u32 fqd_link:24;
+
+ u16 odp_seq:14;
+ u16 __reserved3:2;
+
+ u16 orp_nesn:14;
+ u16 __reserved4:2;
+
+ u16 orp_ea_hseq:15;
+ u16 __reserved5:1;
+
+ u16 orp_ea_tseq:15;
+ u16 __reserved6:1;
+
+ u8 __reserved7;
+ u32 orp_ea_hptr:24;
+
+ u8 __reserved8;
+ u32 orp_ea_tptr:24;
+
+ u8 __reserved9;
+ u32 pfdr_hptr:24;
+
+ u8 __reserved10;
+ u32 pfdr_tptr:24;
+
+ u8 __reserved11[5];
+ u8 is:1;
+ u8 __reserved12:7;
+ u16 ics_surp;
+ u32 byte_cnt;
+ u8 __reserved13;
+ u32 frm_cnt:24;
+ u32 __reserved14;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved15;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+#endif
+} __packed;
+
+struct qm_mcr_alterfq {
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+ u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u16 lgt; /* Last Group Tick */
+ u16 wr_prob_g;
+ u16 wr_prob_y;
+ u16 wr_prob_r;
+ u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[3];
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved3:24;
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+ u32 __reserved3:24;
+#endif
+ };
+ u64 i_bcnt;
+ };
+ union {
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved4:24;
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+#else
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+ u32 __reserved4:24;
+#endif
+ };
+ u64 a_bcnt;
+ };
+ union {
+ u32 cscn_targ_swp[4];
+ u8 __reserved5[16];
+ };
+} __packed;
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+struct qm_mcr_querycongestion {
+ u8 __reserved[30];
+ /* Access this struct using QM_MCR_QUERYCONGESTION() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+#else
+ u16 __reserved:3;
+ u16 id:13; /* qm_channel */
+#endif
+ } __packed channel;
+ };
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+ u8 verb;
+ u8 result;
+ union {
+ struct qm_mcr_initfq initfq;
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_queryfq_np queryfq_np;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_initcgr initcgr;
+ struct qm_mcr_cgrtestwrite cgrtestwrite;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ };
+} __packed;
+
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ * u8 cgr = [...];
+ * struct qm_mc_result *res = [...];
+ * printf("congestion group %d congestion state: %d\n", cgr,
+ * QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num) (num >> 5)
+#define __CGR_SHIFT(num) (num & 0x1f)
+#define __CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+ u8 cgr)
+{
+ return p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because context_b is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. This allows out-of-order
+ * consumes by explicit calls to qman_dca() and/or the use of implicit
+ * DCA via EQCR entries.
+ */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+ const struct qm_mr_entry *msg);
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ qman_cb_dqrr dqrr; /* for dequeued frames */
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+ /*
+ * These are internal to the driver, don't touch. In particular, they
+ * may change, be removed, or extended (so you shouldn't rely on
+ * sizeof(qman_fq) being a constant).
+ */
+ spinlock_t fqlock;
+ u32 fqid;
+ /* DPDK Interface */
+ void *dpaa_intf;
+
+ volatile unsigned long flags;
+ enum qman_fq_state state;
+ int cgr_groupid;
+ struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ u32 key;
+#endif
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED 0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS 0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED 0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.)
+ */
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR 0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA 0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK 0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p) /* If DCA, p is DQRR entry */ \
+ (((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN 0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW 0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED 0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ * of a frame.
+ */
+#define QMAN_ENQUEUE_FLAG_NLIS 0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ * would otherwise block it (eg. if a frame has been dropped).
+ */
+#define QMAN_ENQUEUE_FLAG_HOLE 0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ * number.
+ */
+#define QMAN_ENQUEUE_FLAG_NESN 0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
+
+/**
+ * qman_get_portal_index - get portal configuration index
+ */
+int qman_get_portal_index(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the cpu mask.
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_set_vdq - Issue a volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @num: Number of Frames requested for volatile dequeue
+ *
+ * This function will issue a volatile dequeue command to the QMAN.
+ */
+int qman_set_vdq(struct qman_fq *fq, u16 num);
+
+/**
+ * qman_dequeue - Get the DQRR entry after volatile dequeue command
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ *
+ * This function will return the DQRR entry after a volatile dequeue command
+ * is issued. It will keep returning NULL until there is no packet available on
+ * the DQRR.
+ */
+struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
+
+/**
+ * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * @fq: Frame Queue on which the volatile dequeue command is issued
+ * @dq: DQRR entry to consume. This is the one which is provided by the
+ * 'qbman_dequeue' command.
+ *
+ * This will consume the DQRR enrey and make it available for next volatile
+ * dequeue.
+ */
+void qman_dqrr_consume(struct qman_fq *fq,
+ struct qm_dqrr_entry *dq);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(void);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by Qman but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to Qman (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+ /* FQ management */
+ /* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_has_pkts - Queries non-programmable FQD fields and returns '1'
+ * if packets are in the frame queue. If there are no packets on frame
+ * queue '0' is returned.
+ * @fq: the frame queue object to be queried
+ */
+int qman_query_fq_has_pkts(struct qman_fq *fq);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ * to this software portal. Otherwise, query length of WQs in a
+ * channel specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ * to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when Qman consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, Qman will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until Qman consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+int qman_enqueue_multi(struct qman_fq *fq,
+ const struct qm_fd *fd,
+ int frames_to_send);
+
+typedef int (*qman_cb_precommit) (void *arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+ int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+ qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+ return qman_reserve_fqid_range(fqid, 1);
+}
+
+/* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+ int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+ qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+ return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+ /* CGR management */
+ /* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_query_cgr - Queries CGR fields
+ * @cgr: the 'cgr' object to query
+ * @result: storage for the queried congestion group record
+ */
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+ int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+ qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+ return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+ /* Helpers */
+ /* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ * qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+ struct qm_mcr_queryfq_np np;
+ int err;
+
+ err = qman_query_fq_np(fq, &np);
+ if (err)
+ return err;
+ if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+ return 1;
+ return 0;
+}
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define cpu_to_hw_sg(x) (x)
+#define hw_sg_to_cpu(x) (x)
+#else
+#define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
+
+static inline void __cpu_to_hw_sg(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = cpu_to_be64(sgentry->opaque);
+ sgentry->val = cpu_to_be32(sgentry->val);
+ sgentry->val_off = cpu_to_be16(sgentry->val_off);
+}
+
+static inline void __hw_sg_to_cpu(struct qm_sg_entry *sgentry)
+{
+ sgentry->opaque = be64_to_cpu(sgentry->opaque);
+ sgentry->val = be32_to_cpu(sgentry->val);
+ sgentry->val_off = be16_to_cpu(sgentry->val_off);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_QMAN_H */
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
new file mode 100644
index 00000000..a3243aff
--- /dev/null
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -0,0 +1,107 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_USD_H
+#define __FSL_USD_H
+
+#include <compat.h>
+#include <fsl_qman.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Thread-entry/exit hooks; */
+int qman_thread_init(void);
+int bman_thread_init(void);
+int qman_thread_finish(void);
+int bman_thread_finish(void);
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
+/* Obtain and free raw (unitialized) portals */
+
+struct dpaa_raw_portal {
+ /* inputs */
+
+ /* set to non zero to turn on stashing */
+ uint8_t enable_stash;
+ /* Stashing attributes for the portal */
+ uint32_t cpu;
+ uint32_t cache;
+ uint32_t window;
+
+ /* Specifies the stash request queue this portal should use */
+ uint8_t sdest;
+
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ * for don't care. The portal index will be populated by the
+ * driver when the ioctl() successfully completes.
+ */
+ uint32_t index;
+
+ /* outputs */
+ uint64_t cinh;
+ uint64_t cena;
+};
+
+int qman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int qman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);
+int bman_free_raw_portal(struct dpaa_raw_portal *portal);
+
+/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt
+ * line before notifying us, and this post-processing re-enables it once
+ * processing is complete. As such, it is essential to call this before going
+ * into another blocking read/select/poll.
+ */
+void qman_thread_irq(void);
+void bman_thread_irq(void);
+
+/* Global setup */
+int qman_global_init(void);
+int bman_global_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FSL_USD_H */
diff --git a/drivers/bus/dpaa/include/netcfg.h b/drivers/bus/dpaa/include/netcfg.h
new file mode 100644
index 00000000..b77a6787
--- /dev/null
+++ b/drivers/bus/dpaa/include/netcfg.h
@@ -0,0 +1,96 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2012 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NETCFG_H
+#define __NETCFG_H
+
+#include <fman.h>
+#include <argp.h>
+
+/* Configuration information related to a specific ethernet port */
+struct fm_eth_port_cfg {
+ /**< A list of PCD FQ ranges, obtained from FMC configuration */
+ struct list_head *list;
+ /**< The "Rx default" FQID, obtained from FMC configuration */
+ uint32_t rx_def;
+ /**< Other interface details are in the fman driver interface */
+ struct fman_if *fman_if;
+};
+
+struct netcfg_info {
+ uint8_t num_ethports;
+ /**< Number of ports */
+ struct fm_eth_port_cfg port_cfg[0];
+ /**< Variable structure array of size num_ethports */
+};
+
+struct interface_info {
+ char *name;
+ struct ether_addr mac_addr;
+ struct ether_addr peer_mac;
+ int mac_present;
+ int fman_enabled_mac_interface;
+};
+
+struct netcfg_interface {
+ uint8_t numof_netcfg_interface;
+ uint8_t numof_fman_enabled_macless;
+ struct interface_info interface_info[0];
+};
+
+/* pcd_file: FMC netpcd XML ("policy") file, that contains PCD information.
+ * cfg_file: FMC config XML file
+ * Returns the configuration information in newly allocated memory.
+ */
+struct netcfg_info *netcfg_acquire(void);
+
+/* cfg_ptr: configuration information pointer.
+ * Frees the resources allocated by the configuration layer.
+ */
+void netcfg_release(struct netcfg_info *cfg_ptr);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* cfg_ptr: configuration information pointer.
+ * This function dumps configuration data to stdout.
+ */
+void dump_netcfg(struct netcfg_info *cfg_ptr);
+#endif
+
+#endif /* __NETCFG_H */
diff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h
new file mode 100644
index 00000000..2984b1e1
--- /dev/null
+++ b/drivers/bus/dpaa/include/of.h
@@ -0,0 +1,190 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OF_H
+#define __OF_H
+
+#include <compat.h>
+
+#ifndef OF_INIT_DEFAULT_PATH
+#define OF_INIT_DEFAULT_PATH "/proc/device-tree"
+#endif
+
+#define OF_DEFAULT_NA 1
+#define OF_DEFAULT_NS 1
+
+#define OF_FILE_BUF_MAX 256
+
+/**
+ * Layout of Device Tree:
+ * dt_dir
+ * |- dt_dir
+ * | |- dt_dir
+ * | | |- dt_dir
+ * | | | |- dt_file
+ * | | | ``- dt_file
+ * | | ``- dt_file
+ * | `-dt_file`
+ * ``- dt_file
+ *
+ * +------------------+
+ * |dt_dir |
+ * |+----------------+|
+ * ||dt_node ||
+ * ||+--------------+||
+ * |||device_node |||
+ * ||+--------------+||
+ * || list_dt_nodes ||
+ * |+----------------+|
+ * | list of subdir |
+ * | list of files |
+ * +------------------+
+ */
+
+/**
+ * Device description on of a device node in device tree.
+ */
+struct device_node {
+ char name[NAME_MAX];
+ char full_name[PATH_MAX];
+};
+
+/**
+ * List of device nodes available in a device tree layout
+ */
+struct dt_node {
+ struct device_node node; /**< Property of node */
+ int is_file; /**< FALSE==dir, TRUE==file */
+ struct list_head list; /**< Nodes within a parent subdir */
+};
+
+/**
+ * Types we use to represent directories and files
+ */
+struct dt_file;
+struct dt_dir {
+ struct dt_node node;
+ struct list_head subdirs;
+ struct list_head files;
+ struct list_head linear;
+ struct dt_dir *parent;
+ struct dt_file *compatible;
+ struct dt_file *status;
+ struct dt_file *lphandle;
+ struct dt_file *a_cells;
+ struct dt_file *s_cells;
+ struct dt_file *reg;
+};
+
+struct dt_file {
+ struct dt_node node;
+ struct dt_dir *parent;
+ ssize_t len;
+ uint64_t buf[OF_FILE_BUF_MAX >> 3];
+};
+
+const struct device_node *of_find_compatible_node(
+ const struct device_node *from,
+ const char *type __always_unused,
+ const char *compatible)
+ __attribute__((nonnull(3)));
+
+#define for_each_compatible_node(dev_node, type, compatible) \
+ for (dev_node = of_find_compatible_node(NULL, type, compatible); \
+ dev_node != NULL; \
+ dev_node = of_find_compatible_node(dev_node, type, compatible))
+
+const void *of_get_property(const struct device_node *from, const char *name,
+ size_t *lenp) __attribute__((nonnull(2)));
+bool of_device_is_available(const struct device_node *dev_node);
+
+const struct device_node *of_find_node_by_phandle(phandle ph);
+
+const struct device_node *of_get_parent(const struct device_node *dev_node);
+
+const struct device_node *of_get_next_child(const struct device_node *dev_node,
+ const struct device_node *prev);
+
+#define for_each_child_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+
+uint32_t of_n_addr_cells(const struct device_node *dev_node);
+uint32_t of_n_size_cells(const struct device_node *dev_node);
+
+const uint32_t *of_get_address(const struct device_node *dev_node, size_t idx,
+ uint64_t *size, uint32_t *flags);
+
+uint64_t of_translate_address(const struct device_node *dev_node,
+ const u32 *addr) __attribute__((nonnull));
+
+bool of_device_is_compatible(const struct device_node *dev_node,
+ const char *compatible);
+
+/* of_init() must be called prior to initialisation or use of any driver
+ * subsystem that is device-tree-dependent. Eg. Qman/Bman, config layers, etc.
+ * The path should usually be "/proc/device-tree".
+ */
+int of_init_path(const char *dt_path);
+
+/* of_finish() allows a controlled tear-down of the device-tree layer, eg. if a
+ * full reload is desired without a process exit.
+ */
+void of_finish(void);
+
+/* Use of this wrapper is recommended. */
+static inline int of_init(void)
+{
+ return of_init_path(OF_INIT_DEFAULT_PATH);
+}
+
+/* Read a numeric property according to its size and return it as a 64-bit
+ * value.
+ */
+static inline uint64_t of_read_number(const __be32 *cell, int size)
+{
+ uint64_t r = 0;
+
+ while (size--)
+ r = (r << 32) | be32toh(*(cell++));
+ return r;
+}
+
+#endif /* __OF_H */
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
new file mode 100644
index 00000000..989ddcd5
--- /dev/null
+++ b/drivers/bus/dpaa/include/process.h
@@ -0,0 +1,107 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PROCESS_H
+#define __PROCESS_H
+
+#include <compat.h>
+
+/* The process device underlies process-wide user/kernel interactions, such as
+ * mapping dma_mem memory and providing accompanying ioctl()s. (This isn't used
+ * for portals, which use one UIO device each.).
+ */
+#define PROCESS_PATH "/dev/fsl-usdpaa"
+
+/* Allocation of resource IDs uses a generic interface. This enum is used to
+ * distinguish between the type of underlying object being manipulated.
+ */
+enum dpaa_id_type {
+ dpaa_id_fqid,
+ dpaa_id_bpid,
+ dpaa_id_qpool,
+ dpaa_id_cgrid,
+ dpaa_id_max /* <-- not a valid type, represents the number of types */
+};
+
+int process_alloc(enum dpaa_id_type id_type, uint32_t *base, uint32_t num,
+ uint32_t align, int partial);
+void process_release(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+int process_reserve(enum dpaa_id_type id_type, uint32_t base, uint32_t num);
+
+/* Mapping and using QMan/BMan portals */
+enum dpaa_portal_type {
+ dpaa_portal_qman,
+ dpaa_portal_bman,
+};
+
+struct dpaa_ioctl_portal_map {
+ /* Input parameter, is a qman or bman portal required. */
+ enum dpaa_portal_type type;
+ /* Specifes a specific portal index to map or 0xffffffff
+ * for don't care.
+ */
+ uint32_t index;
+
+ /* Return value if the map succeeds, this gives the mapped
+ * cache-inhibited (cinh) and cache-enabled (cena) addresses.
+ */
+ struct dpaa_portal_map {
+ void *cinh;
+ void *cena;
+ } addr;
+ /* Qman-specific return values */
+ u16 channel;
+ uint32_t pools;
+};
+
+int process_portal_map(struct dpaa_ioctl_portal_map *params);
+int process_portal_unmap(struct dpaa_portal_map *map);
+
+struct dpaa_ioctl_irq_map {
+ enum dpaa_portal_type type; /* Type of portal to map */
+ int fd; /* File descriptor that contains the portal */
+ void *portal_cinh; /* Cache inhibited area to identify the portal */
+};
+
+int process_portal_irq_map(int fd, struct dpaa_ioctl_irq_map *irq);
+int process_portal_irq_unmap(int fd);
+
+#endif /* __PROCESS_H */
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
new file mode 100644
index 00000000..fb9d5323
--- /dev/null
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -0,0 +1,66 @@
+DPDK_17.11 {
+ global:
+
+ bman_acquire;
+ bman_free_pool;
+ bman_get_params;
+ bman_global_init;
+ bman_new_pool;
+ bman_query_free_buffers;
+ bman_release;
+ dpaa_logtype_mempool;
+ dpaa_logtype_pmd;
+ dpaa_netcfg;
+ fman_ccsr_map_fd;
+ fman_dealloc_bufs_mask_hi;
+ fman_dealloc_bufs_mask_lo;
+ fman_if_add_mac_addr;
+ fman_if_clear_mac_addr;
+ fman_if_disable_rx;
+ fman_if_enable_rx;
+ fman_if_discard_rx_errors;
+ fman_if_get_fc_threshold;
+ fman_if_get_fc_quanta;
+ fman_if_get_fdoff;
+ fman_if_loopback_disable;
+ fman_if_loopback_enable;
+ fman_if_promiscuous_disable;
+ fman_if_promiscuous_enable;
+ fman_if_reset_mcast_filter_table;
+ fman_if_set_bp;
+ fman_if_set_fc_threshold;
+ fman_if_set_fc_quanta;
+ fman_if_set_fdoff;
+ fman_if_set_ic_params;
+ fman_if_set_maxfrm;
+ fman_if_set_mcast_filter_table;
+ fman_if_stats_get;
+ fman_if_stats_get_all;
+ fman_if_stats_reset;
+ fman_ip_rev;
+ netcfg_acquire;
+ netcfg_release;
+ of_find_compatible_node;
+ of_get_property;
+ qm_channel_caam;
+ qman_create_fq;
+ qman_dequeue;
+ qman_dqrr_consume;
+ qman_enqueue;
+ qman_enqueue_multi;
+ qman_fq_fqid;
+ qman_fq_state;
+ qman_global_init;
+ qman_init_fq;
+ qman_poll_dqrr;
+ qman_query_fq_np;
+ qman_set_vdq;
+ qman_reserve_fqid_range;
+ qman_volatile_dequeue;
+ rte_dpaa_driver_register;
+ rte_dpaa_driver_unregister;
+ rte_dpaa_mem_ptov;
+ rte_dpaa_portal_init;
+
+ local: *;
+};
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
new file mode 100644
index 00000000..eafc944d
--- /dev/null
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -0,0 +1,173 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __RTE_DPAA_BUS_H__
+#define __RTE_DPAA_BUS_H__
+
+#include <rte_bus.h>
+#include <rte_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define FSL_DPAA_BUS_NAME "FSL_DPAA_BUS"
+
+#define DEV_TO_DPAA_DEVICE(ptr) \
+ container_of(ptr, struct rte_dpaa_device, device)
+
+struct rte_dpaa_device;
+struct rte_dpaa_driver;
+
+/* DPAA Device and Driver lists for DPAA bus */
+TAILQ_HEAD(rte_dpaa_device_list, rte_dpaa_device);
+TAILQ_HEAD(rte_dpaa_driver_list, rte_dpaa_driver);
+
+/* Configuration variables exported from DPAA bus */
+extern struct netcfg_info *dpaa_netcfg;
+
+enum rte_dpaa_type {
+ FSL_DPAA_ETH = 1,
+ FSL_DPAA_CRYPTO,
+};
+
+struct rte_dpaa_bus {
+ struct rte_bus bus;
+ struct rte_dpaa_device_list device_list;
+ struct rte_dpaa_driver_list driver_list;
+ int device_count;
+};
+
+struct dpaa_device_id {
+ uint8_t fman_id; /**< Fman interface ID, for ETH type device */
+ uint8_t mac_id; /**< Fman MAC interface ID, for ETH type device */
+ uint16_t dev_id; /**< Device Identifier from DPDK */
+};
+
+struct rte_dpaa_device {
+ TAILQ_ENTRY(rte_dpaa_device) next;
+ struct rte_device device;
+ union {
+ struct rte_eth_dev *eth_dev;
+ struct rte_cryptodev *crypto_dev;
+ };
+ struct rte_dpaa_driver *driver;
+ struct dpaa_device_id id;
+ enum rte_dpaa_type device_type; /**< Ethernet or crypto type device */
+ char name[RTE_ETH_NAME_MAX_LEN];
+};
+
+typedef int (*rte_dpaa_probe_t)(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev);
+typedef int (*rte_dpaa_remove_t)(struct rte_dpaa_device *dpaa_dev);
+
+struct rte_dpaa_driver {
+ TAILQ_ENTRY(rte_dpaa_driver) next;
+ struct rte_driver driver;
+ struct rte_dpaa_bus *dpaa_bus;
+ enum rte_dpaa_type drv_type;
+ rte_dpaa_probe_t probe;
+ rte_dpaa_remove_t remove;
+};
+
+struct dpaa_portal {
+ uint32_t bman_idx; /**< BMAN Portal ID*/
+ uint32_t qman_idx; /**< QMAN Portal ID*/
+ uint64_t tid;/**< Parent Thread id for this portal */
+};
+
+/* TODO - this is costly, need to write a fast coversion routine */
+static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) {
+ if (paddr >= memseg[i].phys_addr && paddr <
+ memseg[i].phys_addr + memseg[i].len)
+ return (uint8_t *)(memseg[i].addr) +
+ (paddr - memseg[i].phys_addr);
+ }
+
+ return NULL;
+}
+
+/**
+ * Register a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be registered.
+ */
+void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
+
+/**
+ * Unregister a DPAA driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
+
+/**
+ * Initialize a DPAA portal
+ *
+ * @param arg
+ * Per thread ID
+ *
+ * @return
+ * 0 in case of success, error otherwise
+ */
+int rte_dpaa_portal_init(void *arg);
+
+/**
+ * Cleanup a DPAA Portal
+ */
+void dpaa_portal_finish(void *arg);
+
+/** Helper for DPAA device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
+RTE_INIT(dpaainitfn_ ##nm); \
+static void dpaainitfn_ ##nm(void) \
+{\
+ (dpaa_drv).driver.name = RTE_STR(nm);\
+ rte_dpaa_driver_register(&dpaa_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA_BUS_H__ */
diff --git a/drivers/bus/dpaa/rte_dpaa_logs.h b/drivers/bus/dpaa/rte_dpaa_logs.h
new file mode 100644
index 00000000..037c96b0
--- /dev/null
+++ b/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA_LOGS_H_
+#define _DPAA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int dpaa_logtype_bus;
+extern int dpaa_logtype_mempool;
+extern int dpaa_logtype_pmd;
+
+#define DPAA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
+#define DPAA_BUS_HWWARN(cond, fmt, args...) \
+ do {\
+ if (cond) \
+ DPAA_BUS_LOG(DEBUG, "WARN: " fmt, ##args); \
+ } while (0)
+#else
+#define DPAA_BUS_HWWARN(cond, fmt, args...) do { } while (0)
+#endif
+
+#define DPAA_BUS_DEBUG(fmt, args...) \
+ DPAA_BUS_LOG(DEBUG, fmt, ## args)
+#define DPAA_BUS_INFO(fmt, args...) \
+ DPAA_BUS_LOG(INFO, fmt, ## args)
+#define DPAA_BUS_ERR(fmt, args...) \
+ DPAA_BUS_LOG(ERR, fmt, ## args)
+#define DPAA_BUS_WARN(fmt, args...) \
+ DPAA_BUS_LOG(WARNING, fmt, ## args)
+
+/* Mempool related logs */
+
+#define DPAA_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_mempool, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define MEMPOOL_INIT_FUNC_TRACE() DPAA_MEMPOOL_LOG(DEBUG, " >>")
+
+#define DPAA_MEMPOOL_DPDEBUG(fmt, args...) \
+ RTE_LOG_DP(DEBUG, PMD, fmt, ## args)
+#define DPAA_MEMPOOL_DEBUG(fmt, args...) \
+ DPAA_MEMPOOL_LOG(DEBUG, fmt, ## args)
+#define DPAA_MEMPOOL_ERR(fmt, args...) \
+ DPAA_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA_MEMPOOL_INFO(fmt, args...) \
+ DPAA_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA_MEMPOOL_WARN(fmt, args...) \
+ DPAA_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* PMD related logs */
+
+#define DPAA_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_pmd, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_PMD_LOG(DEBUG, " >>")
+
+#define DPAA_PMD_DEBUG(fmt, args...) \
+ DPAA_PMD_LOG(DEBUG, fmt, ## args)
+#define DPAA_PMD_ERR(fmt, args...) \
+ DPAA_PMD_LOG(ERR, fmt, ## args)
+#define DPAA_PMD_INFO(fmt, args...) \
+ DPAA_PMD_LOG(INFO, fmt, ## args)
+#define DPAA_PMD_WARN(fmt, args...) \
+ DPAA_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#endif /* _DPAA_LOGS_H_ */
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index d1b790b5..c08b2af9 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -51,6 +51,9 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
# versioning export map
EXPORT_MAP := rte_bus_fslmc_version.map
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index f71598d5..480857e5 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -42,32 +42,213 @@
#include <rte_memcpy.h>
#include <rte_ethdev.h>
-#include "rte_fslmc.h"
-#include "fslmc_vfio.h"
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
#define FSLMC_BUS_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, "%s(): " fmt "\n", __func__, ##args)
+ RTE_LOG(level, EAL, fmt "\n", ##args)
+
+#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
struct rte_fslmc_bus rte_fslmc_bus;
+static void
+cleanup_fslmc_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_device *t_dev;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, t_dev) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+static int
+compare_dpaa2_devname(struct rte_dpaa2_device *dev1,
+ struct rte_dpaa2_device *dev2)
+{
+ int comp;
+
+ if (dev1->dev_type > dev2->dev_type) {
+ comp = 1;
+ } else if (dev1->dev_type < dev2->dev_type) {
+ comp = -1;
+ } else {
+ /* Check the ID as types match */
+ if (dev1->object_id > dev2->object_id)
+ comp = 1;
+ else if (dev1->object_id < dev2->object_id)
+ comp = -1;
+ else
+ comp = 0; /* Duplicate device name */
+ }
+
+ return comp;
+}
+
+static void
+insert_in_device_list(struct rte_dpaa2_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa2_device *dev = NULL;
+ struct rte_dpaa2_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, tdev) {
+ comp = compare_dpaa2_devname(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next);
+}
+
+static int
+scan_one_fslmc_device(char *dev_name)
+{
+ char *dup_dev_name, *t_ptr;
+ struct rte_dpaa2_device *dev;
+
+ if (!dev_name)
+ return -1;
+
+ /* Ignore the Container name itself */
+ if (!strncmp("dprc", dev_name, 4))
+ return 0;
+
+ /* Creating a temporary copy to perform cut-parse over string */
+ dup_dev_name = strdup(dev_name);
+ if (!dup_dev_name) {
+ FSLMC_BUS_LOG(ERR, "Out of memory.");
+ return -ENOMEM;
+ }
+
+ /* For all other devices, we allocate rte_dpaa2_device.
+ * For those devices where there is no driver, probe would release
+ * the memory associated with the rte_dpaa2_device after necessary
+ * initialization.
+ */
+ dev = calloc(1, sizeof(struct rte_dpaa2_device));
+ if (!dev) {
+ FSLMC_BUS_LOG(ERR, "Out of memory.");
+ free(dup_dev_name);
+ return -ENOMEM;
+ }
+
+ /* Parse the device name and ID */
+ t_ptr = strtok(dup_dev_name, ".");
+ if (!t_ptr) {
+ FSLMC_BUS_LOG(ERR, "Incorrect device string observed.");
+ goto cleanup;
+ }
+ if (!strncmp("dpni", t_ptr, 4))
+ dev->dev_type = DPAA2_ETH;
+ else if (!strncmp("dpseci", t_ptr, 6))
+ dev->dev_type = DPAA2_CRYPTO;
+ else if (!strncmp("dpcon", t_ptr, 5))
+ dev->dev_type = DPAA2_CON;
+ else if (!strncmp("dpbp", t_ptr, 4))
+ dev->dev_type = DPAA2_BPOOL;
+ else if (!strncmp("dpio", t_ptr, 4))
+ dev->dev_type = DPAA2_IO;
+ else if (!strncmp("dpci", t_ptr, 5))
+ dev->dev_type = DPAA2_CI;
+ else if (!strncmp("dpmcp", t_ptr, 5))
+ dev->dev_type = DPAA2_MPORTAL;
+ else
+ dev->dev_type = DPAA2_UNKNOWN;
+
+ t_ptr = strtok(NULL, ".");
+ if (!t_ptr) {
+ FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).",
+ t_ptr);
+ goto cleanup;
+ }
+
+ sscanf(t_ptr, "%hu", &dev->object_id);
+ dev->device.name = strdup(dev_name);
+ if (!dev->device.name) {
+ FSLMC_BUS_LOG(ERR, "Out of memory.");
+ goto cleanup;
+ }
+
+ /* Add device in the fslmc device list */
+ insert_in_device_list(dev);
+
+ /* Don't need the duplicated device filesystem entry anymore */
+ if (dup_dev_name)
+ free(dup_dev_name);
+
+ return 0;
+cleanup:
+ if (dup_dev_name)
+ free(dup_dev_name);
+ if (dev)
+ free(dev);
+ return -1;
+}
+
static int
rte_fslmc_scan(void)
{
int ret;
+ int device_count = 0;
+ char fslmc_dirpath[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ static int process_once;
+ int groupid;
- ret = fslmc_vfio_setup_group();
- if (ret) {
- FSLMC_BUS_LOG(ERR, "fslmc: Unable to setup VFIO");
- return ret;
+ if (process_once) {
+ FSLMC_BUS_LOG(DEBUG,
+ "Fslmc bus already scanned. Not rescanning");
+ return 0;
}
+ process_once = 1;
- ret = fslmc_vfio_process_group();
- if (ret) {
- FSLMC_BUS_LOG(ERR, "fslmc: Unable to setup devices");
- return -1;
+ ret = fslmc_get_container_group(&groupid);
+ if (ret != 0)
+ goto scan_fail;
+
+ /* Scan devices on the group */
+ sprintf(fslmc_dirpath, "%s/%d/devices", VFIO_IOMMU_GROUP_PATH,
+ groupid);
+ dir = opendir(fslmc_dirpath);
+ if (!dir) {
+ FSLMC_BUS_LOG(ERR, "Unable to open VFIO group dir.");
+ goto scan_fail;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_name[0] == '.' || entry->d_type != DT_LNK)
+ continue;
+
+ ret = scan_one_fslmc_device(entry->d_name);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+ device_count += 1;
}
- RTE_LOG(INFO, EAL, "fslmc: Bus scan completed\n");
+ FSLMC_BUS_LOG(INFO, "fslmc: Bus scan completed");
+
+ closedir(dir);
+ return 0;
+
+scan_fail_cleanup:
+ closedir(dir);
+
+ /* Remove all devices in the list */
+ cleanup_fslmc_device_list();
+scan_fail:
+ FSLMC_BUS_LOG(DEBUG, "FSLMC Bus Not Available. Skipping.");
+ /* Irrespective of failure, scan only return success */
return 0;
}
@@ -88,6 +269,21 @@ rte_fslmc_probe(void)
struct rte_dpaa2_device *dev;
struct rte_dpaa2_driver *drv;
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return 0;
+
+ ret = fslmc_vfio_setup_group();
+ if (ret) {
+ FSLMC_BUS_LOG(ERR, "Unable to setup VFIO %d", ret);
+ return 0;
+ }
+
+ ret = fslmc_vfio_process_group();
+ if (ret) {
+ FSLMC_BUS_LOG(ERR, "Unable to setup devices %d", ret);
+ return 0;
+ }
+
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -103,7 +299,8 @@ rte_fslmc_probe(void)
break;
}
}
- return ret;
+
+ return 0;
}
static struct rte_device *
@@ -149,11 +346,21 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
driver->fslmc_bus = NULL;
}
+/*
+ * Get iommu class of DPAA2 devices on the bus.
+ */
+static enum rte_iova_mode
+rte_dpaa2_get_iommu_class(void)
+{
+ return RTE_IOVA_PA;
+}
+
struct rte_fslmc_bus rte_fslmc_bus = {
.bus = {
.scan = rte_fslmc_scan,
.probe = rte_fslmc_probe,
.find_device = rte_fslmc_find_device,
+ .get_iommu_class = rte_dpaa2_get_iommu_class,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 45e59277..7831201a 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -46,6 +46,7 @@
#include <dirent.h>
#include <sys/eventfd.h>
+#include <eal_filesystem.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -58,28 +59,30 @@
#include "rte_fslmc.h"
#include "fslmc_vfio.h"
+#include <mc/fsl_dpmng.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-#define VFIO_MAX_CONTAINERS 1
-
#define FSLMC_VFIO_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, "%s(): " fmt "\n", __func__, ##args)
+ RTE_LOG(level, EAL, fmt "\n", ##args)
/** Pathname of FSL-MC devices directory. */
#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
+#define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
+
/* Number of VFIO containers & groups with in */
-static struct fslmc_vfio_group vfio_groups[VFIO_MAX_GRP];
-static struct fslmc_vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
+static struct fslmc_vfio_group vfio_group;
+static struct fslmc_vfio_container vfio_container;
static int container_device_fd;
+static char *g_container;
static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
-static uint32_t mcp_id;
static int is_dma_done;
-static struct rte_fslmc_object_list fslmc_obj_list =
- TAILQ_HEAD_INITIALIZER(fslmc_obj_list);
+
+static struct rte_dpaa2_object_list dpaa2_obj_list =
+ TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
/*register a fslmc bus based dpaa2 driver */
void
@@ -87,27 +90,69 @@ rte_fslmc_object_register(struct rte_dpaa2_object *object)
{
RTE_VERIFY(object);
- TAILQ_INSERT_TAIL(&fslmc_obj_list, object, next);
+ TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
}
-static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
+int
+fslmc_get_container_group(int *groupid)
{
- struct fslmc_vfio_container *container;
- int i, fd, ret;
+ int ret;
+ char *container;
- /* Try connecting to vfio container if already created */
- for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
- container = &vfio_containers[i];
- if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
- &container->fd)) {
- FSLMC_VFIO_LOG(INFO,
- "Container pre-exists with FD[0x%x] for this group",
- container->fd);
- vfio_group->container = container;
- return 0;
+ if (!g_container) {
+ container = getenv("DPRC");
+ if (container == NULL) {
+ RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n");
+ return -EINVAL;
+ }
+
+ if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
+ FSLMC_VFIO_LOG(ERR, "Invalid container name: %s\n",
+ container);
+ return -1;
+ }
+
+ g_container = strdup(container);
+ if (!g_container) {
+ FSLMC_VFIO_LOG(ERR, "Out of memory.");
+ return -ENOMEM;
}
}
+ /* get group number */
+ ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid);
+ if (ret <= 0) {
+ FSLMC_VFIO_LOG(ERR, "Unable to find %s IOMMU group",
+ g_container);
+ return -1;
+ }
+
+ FSLMC_VFIO_LOG(DEBUG, "Container: %s has VFIO iommu group id = %d",
+ g_container, *groupid);
+
+ return 0;
+}
+
+static int
+vfio_connect_container(void)
+{
+ int fd, ret;
+
+ if (vfio_container.used) {
+ FSLMC_VFIO_LOG(DEBUG, "No container available.");
+ return -1;
+ }
+
+ /* Try connecting to vfio container if already created */
+ if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
+ &vfio_container.fd)) {
+ FSLMC_VFIO_LOG(INFO,
+ "Container pre-exists with FD[0x%x] for this group",
+ vfio_container.fd);
+ vfio_group.container = &vfio_container;
+ return 0;
+ }
+
/* Opens main vfio file descriptor which represents the "container" */
fd = vfio_get_container_fd();
if (fd < 0) {
@@ -118,7 +163,7 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
/* Check whether support for SMMU type IOMMU present or not */
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
/* Connect group to container */
- ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
FSLMC_VFIO_LOG(ERR, "Failed to setup group container");
close(fd);
@@ -137,23 +182,11 @@ static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
return -EINVAL;
}
- container = NULL;
- for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
- if (vfio_containers[i].used)
- continue;
- container = &vfio_containers[i];
- }
- if (!container) {
- FSLMC_VFIO_LOG(ERR, "No free container found");
- close(fd);
- return -ENOMEM;
- }
+ vfio_container.used = 1;
+ vfio_container.fd = fd;
+ vfio_container.group = &vfio_group;
+ vfio_group.container = &vfio_container;
- container->used = 1;
- container->fd = fd;
- container->group_list[container->index] = vfio_group;
- vfio_group->container = container;
- container->index++;
return 0;
}
@@ -222,7 +255,7 @@ int rte_fslmc_vfio_dmamap(void)
#endif
/* SET DMA MAP for IOMMU */
- group = &vfio_groups[0];
+ group = &vfio_group;
if (!group->container) {
FSLMC_VFIO_LOG(ERR, "Container is not connected ");
@@ -301,361 +334,367 @@ MC_FAILURE:
return v_addr;
}
-static inline int
-dpaa2_compare_dpaa2_dev(const struct rte_dpaa2_device *dev,
- const struct rte_dpaa2_device *dev2)
-{
- /*not the same family device */
- if (dev->dev_type != DPAA2_MC_DPNI_DEVID ||
- dev->dev_type != DPAA2_MC_DPSECI_DEVID)
- return -1;
-
- if (dev->object_id == dev2->object_id)
- return 0;
- else
- return 1;
-}
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
-static void
-fslmc_bus_add_device(struct rte_dpaa2_device *dev)
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
{
- struct rte_fslmc_device_list *dev_l;
-
- dev_l = &rte_fslmc_bus.device_list;
-
- /* device is valid, add in list (sorted) */
- if (TAILQ_EMPTY(dev_l)) {
- TAILQ_INSERT_TAIL(dev_l, dev, next);
- } else {
- struct rte_dpaa2_device *dev2;
- int ret;
+ int len, ret;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
- TAILQ_FOREACH(dev2, dev_l, next) {
- ret = dpaa2_compare_dpaa2_dev(dev, dev2);
- if (ret <= 0)
- continue;
+ len = sizeof(irq_set_buf);
- TAILQ_INSERT_BEFORE(dev2, dev, next);
- return;
- }
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->count = 1;
+ irq_set->flags =
+ VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = intr_handle->fd;
- TAILQ_INSERT_TAIL(dev_l, dev, next);
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Error:dpaa2 SET IRQs fd=%d, err = %d(%s)\n",
+ intr_handle->fd, errno, strerror(errno));
+ return ret;
}
-}
-#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+ return ret;
+}
-int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
- uint32_t index)
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
{
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
- int *fd_ptr, fd, ret;
+ int len, ret;
+
+ len = sizeof(struct vfio_irq_set);
- /* Prepare vfio_irq_set structure and SET the IRQ in VFIO */
- /* Give the eventfd to VFIO */
- fd = eventfd(0, 0);
irq_set = (struct vfio_irq_set *)irq_set_buf;
- irq_set->argsz = sizeof(irq_set_buf);
- irq_set->count = 1;
- irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
- VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->argsz = len;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = index;
irq_set->start = 0;
- fd_ptr = (int *)&irq_set->data;
- *fd_ptr = fd;
+ irq_set->count = 0;
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
- if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "Unable to set IRQ in VFIO, ret: %d\n",
- ret);
- return -1;
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "Error disabling dpaa2 interrupts for fd %d\n",
+ intr_handle->fd);
+
+ return ret;
+}
+
+/* set up interrupt support (but not enable interrupts) */
+int
+rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs)
+{
+ int i, ret;
+
+ /* start from MSI-X interrupt type */
+ for (i = 0; i < num_irqs; i++) {
+ struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
+ int fd = -1;
+
+ irq_info.index = i;
+
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR,
+ "cannot get IRQ(%d) info, error %i (%s)",
+ i, errno, strerror(errno));
+ return -1;
+ }
+
+ /* if this vector cannot be used with eventfd,
+ * fail if we explicitly
+ * specified interrupt type, otherwise continue
+ */
+ if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
+ continue;
+
+ /* set up an eventfd for interrupts */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ FSLMC_VFIO_LOG(ERR,
+ "cannot set up eventfd, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ intr_handle->fd = fd;
+ intr_handle->type = RTE_INTR_HANDLE_VFIO_MSI;
+ intr_handle->vfio_dev_fd = vfio_dev_fd;
+
+ return 0;
}
- /* Set the FD and update the flags */
- intr_handle->fd = fd;
- return 0;
+ /* if we're here, we haven't found a suitable interrupt vector */
+ return -1;
}
-/* Following function shall fetch total available list of MC devices
- * from VFIO container & populate private list of devices and other
- * data structures
+/*
+ * fslmc_process_iodevices for processing only IO (ETH, CRYPTO, and possibly
+ * EVENT) devices.
*/
-int fslmc_vfio_process_group(void)
+static int
+fslmc_process_iodevices(struct rte_dpaa2_device *dev)
{
- struct fslmc_vfio_device *vdev;
+ int dev_fd;
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
- char *temp_obj, *object_type, *mcp_obj, *dev_name;
- int32_t object_id, i, dev_fd, ret;
- DIR *d;
- struct dirent *dir;
- char path[PATH_MAX];
- int64_t v_addr;
- int ndev_count;
- struct fslmc_vfio_group *group = &vfio_groups[0];
- static int process_once;
+ struct rte_dpaa2_object *object = NULL;
- /* if already done once */
- if (process_once) {
- FSLMC_VFIO_LOG(DEBUG,
- "Already scanned once - re-scan not supported");
- return 0;
+ dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
+ dev->device.name);
+ if (dev_fd <= 0) {
+ FSLMC_VFIO_LOG(ERR, "Unable to obtain device FD for device:%s",
+ dev->device.name);
+ return -1;
}
- process_once = 0;
-
- sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid);
- d = opendir(path);
- if (!d) {
- FSLMC_VFIO_LOG(ERR, "Unable to open directory %s", path);
+ if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
+ FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
return -1;
}
- /*Counting the number of devices in a group and getting the mcp ID*/
- ndev_count = 0;
- mcp_obj = NULL;
- while ((dir = readdir(d)) != NULL) {
- if (dir->d_type == DT_LNK) {
- ndev_count++;
- if (!strncmp("dpmcp", dir->d_name, 5)) {
- if (mcp_obj)
- free(mcp_obj);
- mcp_obj = malloc(sizeof(dir->d_name));
- if (!mcp_obj) {
- FSLMC_VFIO_LOG(ERR,
- "mcp obj:alloc failed");
- closedir(d);
- return -ENOMEM;
- }
- strcpy(mcp_obj, dir->d_name);
- temp_obj = strtok(dir->d_name, ".");
- temp_obj = strtok(NULL, ".");
- sscanf(temp_obj, "%d", &mcp_id);
- }
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ rte_dpaa2_vfio_setup_intr(&dev->intr_handle, dev_fd,
+ device_info.num_irqs);
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
+ if (dev->dev_type == object->dev_type)
+ object->create(dev_fd, &device_info,
+ dev->object_id);
+ else
+ continue;
}
+ break;
+ default:
+ break;
}
- closedir(d);
- d = NULL;
- if (!mcp_obj) {
- FSLMC_VFIO_LOG(ERR, "DPAA2 MCP Object not Found");
- return -ENODEV;
- }
- RTE_LOG(INFO, EAL, "fslmc: DPRC contains = %d devices\n", ndev_count);
-
- /* Allocate the memory depends upon number of objects in a group*/
- group->vfio_device = (struct fslmc_vfio_device *)malloc(ndev_count *
- sizeof(struct fslmc_vfio_device));
- if (!(group->vfio_device)) {
- FSLMC_VFIO_LOG(ERR, "vfio device: Unable to allocate memory\n");
- free(mcp_obj);
- return -ENOMEM;
- }
- /* Allocate memory for MC Portal list */
+ FSLMC_VFIO_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
+ return 0;
+}
+
+static int
+fslmc_process_mcp(struct rte_dpaa2_device *dev)
+{
+ int64_t v_addr;
+ char *dev_name;
+ struct fsl_mc_io dpmng = {0};
+ struct mc_version mc_ver_info = {0};
+
rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
if (!rte_mcp_ptr_list) {
- FSLMC_VFIO_LOG(ERR, "portal list: Unable to allocate memory!");
- free(mcp_obj);
- goto FAILURE;
+ FSLMC_VFIO_LOG(ERR, "Out of memory");
+ return -ENOMEM;
}
- v_addr = vfio_map_mcp_obj(group, mcp_obj);
- free(mcp_obj);
- if (v_addr == (int64_t)MAP_FAILED) {
- FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)", errno);
- goto FAILURE;
+ dev_name = strdup(dev->device.name);
+ if (!dev_name) {
+ FSLMC_VFIO_LOG(ERR, "Out of memory.");
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ return -ENOMEM;
}
- rte_mcp_ptr_list[0] = (void *)v_addr;
-
- d = opendir(path);
- if (!d) {
- FSLMC_VFIO_LOG(ERR, "Unable to open %s Directory", path);
- goto FAILURE;
+ v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
+ if (v_addr == (int64_t)MAP_FAILED) {
+ FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)",
+ errno);
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ return -1;
}
- i = 0;
- /* Parsing each object and initiating them*/
- while ((dir = readdir(d)) != NULL) {
- if (dir->d_type != DT_LNK)
- continue;
- if (!strncmp("dprc", dir->d_name, 4) ||
- !strncmp("dpmcp", dir->d_name, 5))
- continue;
- dev_name = malloc(sizeof(dir->d_name));
- if (!dev_name) {
- FSLMC_VFIO_LOG(ERR, "name: Unable to allocate memory");
- goto FAILURE;
- }
- strcpy(dev_name, dir->d_name);
- object_type = strtok(dir->d_name, ".");
- temp_obj = strtok(NULL, ".");
- sscanf(temp_obj, "%d", &object_id);
-
- /* getting the device fd*/
- dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
- if (dev_fd < 0) {
- FSLMC_VFIO_LOG(ERR,
- "GET_DEVICE_FD error fd: %s, Group: %d",
- dev_name, group->fd);
- free(dev_name);
- goto FAILURE;
- }
+ /* check the MC version compatibility */
+ dpmng.regs = (void *)v_addr;
+ if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info))
+ RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+
+ if ((mc_ver_info.major != MC_VER_MAJOR) ||
+ (mc_ver_info.minor < MC_VER_MINOR)) {
+ RTE_LOG(ERR, PMD, "DPAA2 MC version not compatible!"
+ " Expected %d.%d.x, Detected %d.%d.%d\n",
+ MC_VER_MAJOR, MC_VER_MINOR,
+ mc_ver_info.major, mc_ver_info.minor,
+ mc_ver_info.revision);
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ return -1;
+ }
+ rte_mcp_ptr_list[0] = (void *)v_addr;
- free(dev_name);
- vdev = &group->vfio_device[group->object_index++];
- vdev->fd = dev_fd;
- vdev->index = i;
- i++;
- /* Get Device inofrmation */
- if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) {
- FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
- goto FAILURE;
- }
- if (!strcmp(object_type, "dpni") ||
- !strcmp(object_type, "dpseci")) {
- struct rte_dpaa2_device *dev;
+ return 0;
+}
- dev = malloc(sizeof(struct rte_dpaa2_device));
- if (dev == NULL)
+int
+fslmc_vfio_process_group(void)
+{
+ int ret;
+ int found_mportal = 0;
+ struct rte_dpaa2_device *dev, *dev_temp;
+
+ /* Search the MCP as that should be initialized first. */
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_MPORTAL) {
+ ret = fslmc_process_mcp(dev);
+ if (ret) {
+ FSLMC_VFIO_LOG(DEBUG, "Unable to map Portal.");
return -1;
-
- memset(dev, 0, sizeof(*dev));
- /* store hw_id of dpni/dpseci device */
- dev->object_id = object_id;
- dev->dev_type = (strcmp(object_type, "dpseci")) ?
- DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
-
- sprintf(dev->name, "%s.%d", object_type, object_id);
- dev->device.name = dev->name;
-
- fslmc_bus_add_device(dev);
- FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added %s", dev->name);
- } else {
- /* Parse all other objects */
- struct rte_dpaa2_object *object;
-
- TAILQ_FOREACH(object, &fslmc_obj_list, next) {
- if (!strcmp(object_type, object->name))
- object->create(vdev, &device_info,
- object_id);
- else
- continue;
}
+ if (!found_mportal)
+ found_mportal = 1;
+
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ /* Ideally there is only a single dpmcp, but in case
+ * multiple exists, looping on remaining devices.
+ */
}
}
- closedir(d);
- ret = dpaa2_affine_qbman_swp();
- if (ret)
- FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
+ /* Cannot continue if there is not even a single mportal */
+ if (!found_mportal) {
+ FSLMC_VFIO_LOG(DEBUG,
+ "No MC Portal device found. Not continuing.");
+ return -1;
+ }
- return 0;
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (!dev)
+ break;
-FAILURE:
- if (d)
- closedir(d);
- if (rte_mcp_ptr_list) {
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
+ switch (dev->dev_type) {
+ case DPAA2_ETH:
+ case DPAA2_CRYPTO:
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ FSLMC_VFIO_LOG(DEBUG,
+ "Dev (%s) init failed.",
+ dev->device.name);
+ return ret;
+ }
+ break;
+ case DPAA2_CON:
+ case DPAA2_IO:
+ case DPAA2_CI:
+ case DPAA2_BPOOL:
+ /* Call the object creation routine and remove the
+ * device entry from device list
+ */
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ FSLMC_VFIO_LOG(DEBUG,
+ "Dev (%s) init failed.",
+ dev->device.name);
+ return -1;
+ }
+
+ /* This device is not required to be in the DPDK
+ * exposed device list.
+ */
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ break;
+ case DPAA2_UNKNOWN:
+ default:
+ /* Unknown - ignore */
+ FSLMC_VFIO_LOG(DEBUG, "Found unknown device (%s).",
+ dev->device.name);
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
}
- free(group->vfio_device);
- group->vfio_device = NULL;
- return -1;
+ return 0;
}
-int fslmc_vfio_setup_group(void)
+int
+fslmc_vfio_setup_group(void)
{
- struct fslmc_vfio_group *group = NULL;
int groupid;
- int ret, i;
- char *container;
+ int ret;
struct vfio_group_status status = { .argsz = sizeof(status) };
/* if already done once */
if (container_device_fd)
return 0;
- container = getenv("DPRC");
-
- if (container == NULL) {
- FSLMC_VFIO_LOG(ERR, "VFIO container not set in env DPRC");
- return -EOPNOTSUPP;
- }
-
- /* get group number */
- ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, container, &groupid);
- if (ret == 0) {
- RTE_LOG(WARNING, EAL, "%s not managed by VFIO, skipping\n",
- container);
- return -EOPNOTSUPP;
- }
-
- /* if negative, something failed */
- if (ret < 0)
+ ret = fslmc_get_container_group(&groupid);
+ if (ret)
return ret;
- FSLMC_VFIO_LOG(DEBUG, "VFIO iommu group id = %d", groupid);
-
- /* Check if group already exists */
- for (i = 0; i < VFIO_MAX_GRP; i++) {
- group = &vfio_groups[i];
- if (group->groupid == groupid) {
- FSLMC_VFIO_LOG(ERR, "groupid already exists %d",
- groupid);
- return 0;
- }
+ /* In case this group was already opened, continue without any
+ * processing.
+ */
+ if (vfio_group.groupid == groupid) {
+ FSLMC_VFIO_LOG(ERR, "groupid already exists %d", groupid);
+ return 0;
}
- /* get the actual group fd */
+ /* Get the actual group fd */
ret = vfio_get_group_fd(groupid);
if (ret < 0)
return ret;
- group->fd = ret;
+ vfio_group.fd = ret;
- /*
- * at this point, we know that this group is viable (meaning,
- * all devices are either bound to VFIO or not bound to anything)
- */
-
- ret = ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status);
+ /* Check group viability */
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
if (ret) {
- FSLMC_VFIO_LOG(ERR, " VFIO error getting group status");
- close(group->fd);
+ FSLMC_VFIO_LOG(ERR, "VFIO error getting group status");
+ close(vfio_group.fd);
return ret;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
- close(group->fd);
+ close(vfio_group.fd);
return -EPERM;
}
/* Since Group is VIABLE, Store the groupid */
- group->groupid = groupid;
+ vfio_group.groupid = groupid;
/* check if group does not have a container yet */
if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
/* Now connect this IOMMU group to given container */
- ret = vfio_connect_container(group);
+ ret = vfio_connect_container();
if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO error connecting container"
- " with groupid %d", groupid);
- close(group->fd);
+ FSLMC_VFIO_LOG(ERR,
+ "Error connecting container with groupid %d",
+ groupid);
+ close(vfio_group.fd);
return ret;
}
}
/* Get Device information */
- ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, container);
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "VFIO error getting device %s fd from"
- " group %d", container, group->groupid);
+ FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d",
+ g_container, vfio_group.groupid);
+ close(vfio_group.fd);
return ret;
}
container_device_fd = ret;
FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]",
- container_device_fd);
+ container_device_fd);
return 0;
}
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index 0aff9b1f..b442dc08 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -34,9 +34,10 @@
#ifndef _FSLMC_VFIO_H_
#define _FSLMC_VFIO_H_
+#include <rte_vfio.h>
+
#include "eal_vfio.h"
-#define DPAA2_VENDOR_ID 0x1957
#define DPAA2_MC_DPNI_DEVID 7
#define DPAA2_MC_DPSECI_DEVID 3
#define DPAA2_MC_DPCON_DEVID 5
@@ -44,8 +45,6 @@
#define DPAA2_MC_DPBP_DEVID 10
#define DPAA2_MC_DPCI_DEVID 11
-#define VFIO_MAX_GRP 1
-
typedef struct fslmc_vfio_device {
int fd; /* fslmc root container device ?? */
int index; /*index of child object */
@@ -64,51 +63,20 @@ typedef struct fslmc_vfio_container {
int fd; /* /dev/vfio/vfio */
int used;
int index; /* index in group list */
- struct fslmc_vfio_group *group_list[VFIO_MAX_GRP];
+ struct fslmc_vfio_group *group;
} fslmc_vfio_container;
-struct rte_dpaa2_object;
-
-TAILQ_HEAD(rte_fslmc_object_list, rte_dpaa2_object);
-
-typedef int (*rte_fslmc_obj_create_t)(struct fslmc_vfio_device *vdev,
- struct vfio_device_info *obj_info,
- int object_id);
-
-/**
- * A structure describing a DPAA2 driver.
- */
-struct rte_dpaa2_object {
- TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
- const char *name; /**< Name of Object. */
- uint16_t object_id; /**< DPAA2 Object ID */
- rte_fslmc_obj_create_t create;
-};
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index);
+int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index);
-int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
- uint32_t index);
+int rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
+ int vfio_dev_fd,
+ int num_irqs);
int fslmc_vfio_setup_group(void);
int fslmc_vfio_process_group(void);
+char *fslmc_get_container(void);
+int fslmc_get_container_group(int *gropuid);
int rte_fslmc_vfio_dmamap(void);
-/**
- * Register a DPAA2 MC Object driver.
- *
- * @param mc_object
- * A pointer to a rte_dpaa_object structure describing the mc object
- * to be registered.
- */
-void rte_fslmc_object_register(struct rte_dpaa2_object *object);
-
-/** Helper for DPAA2 object registration */
-#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
-RTE_INIT(dpaa2objinitfn_ ##nm); \
-static void dpaa2objinitfn_ ##nm(void) \
-{\
- (dpaa2_obj).name = RTE_STR(nm);\
- rte_fslmc_object_register(&dpaa2_obj); \
-} \
-RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
-
#endif /* _FSLMC_VFIO_H_ */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index fd9a52d9..a846245a 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,19 +42,37 @@
#include <fsl_dpbp.h>
#include <fsl_dpbp_cmd.h>
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpbp_id,
uint16_t *token)
{
+ struct dpbp_cmd_open *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
- cmd_flags,
- 0);
- DPBP_CMD_OPEN(cmd, dpbp_id);
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -62,11 +80,22 @@ int dpbp_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return err;
}
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -81,6 +110,24 @@ int dpbp_close(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_create() - Create the DPBP object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPBP object, allocate required resources and
+ * perform required initialization.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_create(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
uint32_t cmd_flags,
@@ -94,8 +141,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
- cmd_flags,
- dprc_token);
+ cmd_flags, dprc_token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -103,28 +149,47 @@ int dpbp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
+/**
+ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPBP object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
int dpbp_destroy(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+ uint32_t cmd_flags,
+ uint32_t obj_id)
{
+ struct dpbp_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
- cmd_flags,
- dprc_token);
- /* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ cmd_flags, dprc_token);
+
+ cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -139,6 +204,14 @@ int dpbp_enable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -147,20 +220,30 @@ int dpbp_disable(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
- cmd_flags,
- token);
+ cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpbp_is_enabled() - Check if the DPBP is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
+ struct dpbp_rsp_is_enabled *rsp_params;
struct mc_command cmd = { 0 };
int err;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
token);
@@ -171,11 +254,20 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPBP_RSP_IS_ENABLED(cmd, *en);
+ rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
return 0;
}
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -184,8 +276,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
- cmd_flags,
- token);
+ cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -195,13 +286,13 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
uint16_t token,
struct dpbp_attr *attr)
{
+ struct dpbp_rsp_get_attributes *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
- cmd_flags,
- token);
+ cmd_flags, token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -209,38 +300,64 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPBP_RSP_GET_ATTRIBUTES(cmd, attr);
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
return 0;
}
-
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Buffer Pool API
+ * @minor_ver: Minor version of Buffer Pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpbp_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpbp_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
+ /* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
- cmd_flags,
- 0);
+ cmd_flags, 0);
+ /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- DPBP_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
-int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
+/**
+ * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @num_free_bufs: Number of free buffers
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
uint32_t *num_free_bufs)
{
+ struct dpbp_rsp_get_num_free_bufs *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -255,7 +372,8 @@ int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPBP_RSP_GET_NUM_FREE_BUFS(cmd, *num_free_bufs);
+ rsp_params = (struct dpbp_rsp_get_num_free_bufs *)cmd.params;
+ *num_free_bufs = le32_to_cpu(rsp_params->num_free_bufs);
return 0;
}
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index 0ea78379..5471024c 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -41,11 +41,29 @@
#include <fsl_dpci.h>
#include <fsl_dpci_cmd.h>
+/**
+ * dpci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpci_id: DPCI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpci_id,
uint16_t *token)
{
+ struct dpci_cmd_open *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -53,7 +71,8 @@ int dpci_open(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPCI_CMDID_OPEN,
cmd_flags,
0);
- DPCI_CMD_OPEN(cmd, dpci_id);
+ cmd_params = (struct dpci_cmd_open *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(dpci_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -61,11 +80,22 @@ int dpci_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
+/**
+ * dpci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -81,12 +111,35 @@ int dpci_close(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_create() - Create the DPCI object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPCI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_create(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
uint32_t cmd_flags,
const struct dpci_cfg *cfg,
uint32_t *obj_id)
{
+ struct dpci_cmd_create *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -94,7 +147,8 @@ int dpci_create(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPCI_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPCI_CMD_CREATE(cmd, cfg);
+ cmd_params = (struct dpci_cmd_create *)cmd.params;
+ cmd_params->num_of_priorities = cfg->num_of_priorities;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -102,28 +156,53 @@ int dpci_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
+/**
+ * dpci_destroy() - Destroy the DPCI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
int dpci_destroy(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+ uint32_t cmd_flags,
+ uint32_t object_id)
{
+ struct dpci_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCI_CMDID_DESTROY,
cmd_flags,
dprc_token);
- /* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ cmd_params = (struct dpci_cmd_destroy *)cmd.params;
+ cmd_params->dpci_id = cpu_to_le32(object_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -139,6 +218,14 @@ int dpci_enable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -154,13 +241,24 @@ int dpci_disable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_is_enabled() - Check if the DPCI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
+ struct dpci_rsp_is_enabled *rsp_params;
struct mc_command cmd = { 0 };
int err;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCI_CMDID_IS_ENABLED, cmd_flags,
token);
@@ -171,11 +269,20 @@ int dpci_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCI_RSP_IS_ENABLED(cmd, *en);
+ rsp_params = (struct dpci_rsp_is_enabled *)cmd.params;
+ *en = dpci_get_field(rsp_params->en, ENABLE);
return 0;
}
+/**
+ * dpci_reset() - Reset the DPCI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -196,6 +303,7 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
uint16_t token,
struct dpci_attr *attr)
{
+ struct dpci_rsp_get_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -210,7 +318,9 @@ int dpci_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCI_RSP_GET_ATTRIBUTES(cmd, attr);
+ rsp_params = (struct dpci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
return 0;
}
@@ -221,24 +331,46 @@ int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
uint8_t priority,
const struct dpci_rx_queue_cfg *cfg)
{
+ struct dpci_cmd_set_rx_queue *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCI_CMDID_SET_RX_QUEUE,
cmd_flags,
token);
- DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
+ cmd_params = (struct dpci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPCI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t priority,
struct dpci_rx_queue_attr *attr)
{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_rx_queue *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -246,7 +378,8 @@ int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_RX_QUEUE,
cmd_flags,
token);
- DPCI_CMD_GET_RX_QUEUE(cmd, priority);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -254,17 +387,36 @@ int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCI_RSP_GET_RX_QUEUE(cmd, attr);
+ rsp_params = (struct dpci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type = dpci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
return 0;
}
+/**
+ * dpci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @priority: Select the queue relative to number of
+ * priorities of the peer DPCI object
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t priority,
struct dpci_tx_queue_attr *attr)
{
+ struct dpci_cmd_get_queue *cmd_params;
+ struct dpci_rsp_get_tx_queue *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -272,7 +424,8 @@ int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPCI_CMDID_GET_TX_QUEUE,
cmd_flags,
token);
- DPCI_CMD_GET_TX_QUEUE(cmd, priority);
+ cmd_params = (struct dpci_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -280,16 +433,27 @@ int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCI_RSP_GET_TX_QUEUE(cmd, attr);
+ rsp_params = (struct dpci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
return 0;
}
+/**
+ * dpci_get_api_version() - Get communication interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path communication interface API
+ * @minor_ver: Minor version of data path communication interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpci_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpci_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -301,7 +465,9 @@ int dpci_get_api_version(struct fsl_mc_io *mc_io,
if (err)
return err;
- DPCI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ rsp_params = (struct dpci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index b078dff8..477ee46e 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -34,19 +34,38 @@
#include <fsl_dpcon.h>
#include <fsl_dpcon_cmd.h>
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpcon_id,
uint16_t *token)
{
struct mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
cmd_flags,
0);
- DPCON_CMD_OPEN(cmd, dpcon_id);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -54,11 +73,22 @@ int dpcon_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -74,12 +104,34 @@ int dpcon_close(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpcon_create() - Create the DPCON object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id; use in subsequent API calls
+ *
+ * Create the DPCON object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_create(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpcon_cfg *cfg,
- uint32_t *obj_id)
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id)
{
+ struct dpcon_cmd_create *dpcon_cmd;
struct mc_command cmd = { 0 };
int err;
@@ -87,7 +139,8 @@ int dpcon_create(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPCON_CMD_CREATE(cmd, cfg);
+ dpcon_cmd = (struct dpcon_cmd_create *)cmd.params;
+ dpcon_cmd->num_priorities = cfg->num_priorities;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -95,28 +148,47 @@ int dpcon_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
+/**
+ * dpcon_destroy() - Destroy the DPCON object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @obj_id: ID of DPCON object
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
int dpcon_destroy(struct fsl_mc_io *mc_io,
uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+ uint32_t cmd_flags,
+ uint32_t obj_id)
{
+ struct dpcon_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY,
cmd_flags,
dprc_token);
- /* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ cmd_params = (struct dpcon_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(obj_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpcon_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -132,6 +204,14 @@ int dpcon_enable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpcon_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -147,13 +227,24 @@ int dpcon_disable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
+ struct dpcon_rsp_is_enabled *dpcon_rsp;
struct mc_command cmd = { 0 };
int err;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
cmd_flags,
@@ -165,11 +256,20 @@ int dpcon_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCON_RSP_IS_ENABLED(cmd, *en);
+ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
+ *en = dpcon_rsp->enabled & DPCON_ENABLE;
return 0;
}
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -184,11 +284,21 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpcon_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpcon_attr *attr)
{
+ struct dpcon_rsp_get_attr *dpcon_rsp;
struct mc_command cmd = { 0 };
int err;
@@ -203,28 +313,45 @@ int dpcon_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPCON_RSP_GET_ATTR(cmd, attr);
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
return 0;
}
+/**
+ * dpcon_get_api_version - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's DPCON object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPCON API
+ * @minor_ver: Minor version of DPCON API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpcon_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpcon_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
+ /* prepare command */
cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
- cmd_flags,
- 0);
+ cmd_flags, 0);
+ /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- DPCON_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ /* retrieve response parameters */
+ rsp_params = (struct dpcon_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index 608b57a7..76c5d7b7 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,11 +42,29 @@
#include <fsl_dpio.h>
#include <fsl_dpio_cmd.h>
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and any MC portals
+ * assigned to the parent container; this token must be used in
+ * all subsequent commands for this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpio_id,
uint16_t *token)
{
+ struct dpio_cmd_open *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -54,7 +72,8 @@ int dpio_open(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
cmd_flags,
0);
- DPIO_CMD_OPEN(cmd, dpio_id);
+ cmd_params = (struct dpio_cmd_open *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(dpio_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -62,11 +81,19 @@ int dpio_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -82,12 +109,35 @@ int dpio_close(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
-int dpio_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpio_cfg *cfg,
- uint32_t *obj_id)
+/**
+ * dpio_create() - Create the DPIO object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPIO object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id)
{
+ struct dpio_cmd_create *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -95,7 +145,11 @@ int dpio_create(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPIO_CMD_CREATE(cmd, cfg);
+ cmd_params = (struct dpio_cmd_create *)cmd.params;
+ cmd_params->num_priorities = cfg->num_priorities;
+ dpio_set_field(cmd_params->channel_mode,
+ CHANNEL_MODE,
+ cfg->channel_mode);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -103,28 +157,55 @@ int dpio_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
-int dpio_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+/**
+ * dpio_destroy() - Destroy the DPIO object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
{
+ struct dpio_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
cmd_flags,
dprc_token);
+
/* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ cmd_params = (struct dpio_cmd_destroy *)cmd.params;
+ cmd_params->dpio_id = cpu_to_le32(object_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpio_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -140,6 +221,14 @@ int dpio_enable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
int dpio_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -155,13 +244,24 @@ int dpio_disable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_is_enabled() - Check if the DPIO is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
+ struct dpio_rsp_is_enabled *rsp_params;
struct mc_command cmd = { 0 };
int err;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
token);
@@ -172,11 +272,20 @@ int dpio_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPIO_RSP_IS_ENABLED(cmd, *en);
+ rsp_params = (struct dpio_rsp_is_enabled *)cmd.params;
+ *en = dpio_get_field(rsp_params->en, ENABLE);
return 0;
}
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -197,6 +306,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
uint16_t token,
struct dpio_attr *attr)
{
+ struct dpio_rsp_get_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -211,33 +321,65 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPIO_RSP_GET_ATTRIBUTES(cmd, attr);
+ rsp_params = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->qbman_portal_id = le16_to_cpu(rsp_params->qbman_portal_id);
+ attr->num_priorities = rsp_params->num_priorities;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ce_offset);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(rsp_params->qbman_portal_ci_offset);
+ attr->qbman_version = le32_to_cpu(rsp_params->qbman_version);
+ attr->clk = le32_to_cpu(rsp_params->clk);
+ attr->channel_mode = dpio_get_field(rsp_params->channel_mode,
+ ATTR_CHANNEL_MODE);
return 0;
}
+/**
+ * dpio_set_stashing_destination() - Set the stashing destination.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t sdest)
{
+ struct dpio_stashing_dest *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
cmd_flags,
token);
- DPIO_CMD_SET_STASHING_DEST(cmd, sdest);
+ cmd_params = (struct dpio_stashing_dest *)cmd.params;
+ cmd_params->sdest = sdest;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_get_stashing_destination() - Get the stashing destination..
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Returns the stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t *sdest)
{
+ struct dpio_stashing_dest *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -252,17 +394,30 @@ int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPIO_RSP_GET_STASHING_DEST(cmd, *sdest);
+ rsp_params = (struct dpio_stashing_dest *)cmd.params;
+ *sdest = rsp_params->sdest;
return 0;
}
+/**
+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ * @channel_index: Returned channel index to be used in qbman API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int dpcon_id,
uint8_t *channel_index)
{
+ struct dpio_rsp_add_static_dequeue_channel *rsp_params;
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -270,7 +425,8 @@ int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL,
cmd_flags,
token);
- DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -278,16 +434,27 @@ int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index);
+ rsp_params = (struct dpio_rsp_add_static_dequeue_channel *)cmd.params;
+ *channel_index = rsp_params->channel_index;
return 0;
}
+/**
+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @dpcon_id: DPCON object ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int dpcon_id)
{
+ struct dpio_cmd_static_dequeue_channel *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
@@ -295,17 +462,28 @@ int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL,
cmd_flags,
token);
- DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id);
+ cmd_params = (struct dpio_cmd_static_dequeue_channel *)cmd.params;
+ cmd_params->dpcon_id = cpu_to_le32(dpcon_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpio_get_api_version() - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path i/o API
+ * @minor_ver: Minor version of data path i/o API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpio_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpio_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -317,7 +495,9 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io,
if (err)
return err;
- DPIO_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ rsp_params = (struct dpio_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
diff --git a/drivers/bus/fslmc/mc/dpmng.c b/drivers/bus/fslmc/mc/dpmng.c
index dd1c3ac0..f9946e8c 100644
--- a/drivers/bus/fslmc/mc/dpmng.c
+++ b/drivers/bus/fslmc/mc/dpmng.c
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,11 +42,21 @@
#include <fsl_dpmng.h>
#include <fsl_dpmng_cmd.h>
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int mc_get_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
struct mc_version *mc_ver_info)
{
struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
int err;
/* prepare command */
@@ -59,15 +70,31 @@ int mc_get_version(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPMNG_RSP_GET_VERSION(cmd, mc_ver_info);
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
return 0;
}
+/**
+ * mc_get_soc_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_platform_info: Returned version information structure. The structure
+ * contains the values of SVR and PVR registers.
+ * Please consult platform specific reference manual
+ * for detailed information.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int mc_get_soc_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
struct mc_soc_version *mc_platform_info)
{
+ struct dpmng_rsp_get_soc_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -82,7 +109,9 @@ int mc_get_soc_version(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPMNG_RSP_GET_SOC_VERSION(cmd, mc_platform_info);
+ rsp_params = (struct dpmng_rsp_get_soc_version *)cmd.params;
+ mc_platform_info->svr = le32_to_cpu(rsp_params->svr);
+ mc_platform_info->pvr = le32_to_cpu(rsp_params->pvr);
return 0;
}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 32bb9aa3..77ea6f27 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -40,48 +40,21 @@
#ifndef __FSL_DPBP_H
#define __FSL_DPBP_H
-/* Data Path Buffer Pool API
+/*
+ * Data Path Buffer Pool API
* Contains initialization APIs and runtime control APIs for DPBP
*/
struct fsl_mc_io;
-/**
- * dpbp_open() - Open a control session for the specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpbp_id: DPBP unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpbp_create function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpbp_id,
- uint16_t *token);
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token);
-/**
- * dpbp_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpbp_cfg - Structure representing DPBP configuration
@@ -91,98 +64,33 @@ struct dpbp_cfg {
uint32_t options;
};
-/**
- * dpbp_create() - Create the DPBP object.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPBP object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpbp_cfg *cfg,
- uint32_t *obj_id);
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
- * @dprc_token: Parent container token; '0' for default container
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpbp_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
-/**
- * dpbp_enable() - Enable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpbp_disable() - Disable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpbp_is_enabled() - Check if the DPBP is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
-/**
- * dpbp_reset() - Reset the DPBP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpbp_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpbp_attr - Structure representing DPBP attributes
@@ -195,44 +103,23 @@ struct dpbp_attr {
uint16_t bpid;
};
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr);
+
/**
- * dpbp_get_attributes - Retrieve DPBP attributes.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
+ * DPBP notifications options
*/
-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpbp_attr *attr);
/**
- * dpbp_get_api_version() - Get buffer pool API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path buffer pool API
- * @minor_ver: Minor version of data path buffer pool API
- *
- * Return: '0' on Success; Error code otherwise.
+ * BPSCN write will attempt to allocate into a cache (coherent write)
*/
int dpbp_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
uint16_t *minor_ver);
-/**
- * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPBP object
- * @num_free_bufs: Number of free buffers
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index f0ee65a6..ce38c79b 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,47 +42,90 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 3
-#define DPBP_VER_MINOR 2
+#define DPBP_VER_MINOR 3
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
/* Command IDs */
-#define DPBP_CMDID_CLOSE 0x8001
-#define DPBP_CMDID_OPEN 0x8041
-#define DPBP_CMDID_CREATE 0x9041
-#define DPBP_CMDID_DESTROY 0x9841
-#define DPBP_CMDID_GET_API_VERSION 0xa041
-
-#define DPBP_CMDID_ENABLE 0x0021
-#define DPBP_CMDID_DISABLE 0x0031
-#define DPBP_CMDID_GET_ATTR 0x0041
-#define DPBP_CMDID_RESET 0x0051
-#define DPBP_CMDID_IS_ENABLED 0x0061
-
-#define DPBP_CMDID_GET_FREE_BUFFERS_NUM 0x1b21
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPBP_CMD_OPEN(cmd, dpbp_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPBP_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, (attr)->bpid); \
- MC_RSP_OP(cmd, 0, 32, 32, int, (attr)->id);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPBP_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPBP_RSP_GET_NUM_FREE_BUFS(cmd, num_free_bufs) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, num_free_bufs)
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ_ENABLE DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS DPBP_CMD(0x1b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS DPBP_CMD(0x1b1)
+
+#define DPBP_CMDID_GET_FREE_BUFFERS_NUM DPBP_CMD(0x1b2)
+
+#pragma pack(push, 1)
+struct dpbp_cmd_open {
+ uint32_t dpbp_id;
+};
+
+struct dpbp_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpbp_rsp_get_attributes {
+ uint16_t pad;
+ uint16_t bpid;
+ uint32_t id;
+};
+
+struct dpbp_cmd_set_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+ uint32_t depletion_entry;
+ uint32_t depletion_exit;
+ uint32_t surplus_entry;
+ uint32_t surplus_exit;
+ uint16_t options;
+ uint16_t pad[3];
+ uint64_t message_ctx;
+ uint64_t message_iova;
+};
+
+struct dpbp_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct dpbp_rsp_get_num_free_bufs {
+ uint32_t num_free_bufs;
+};
+#pragma pack(pop)
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index 1e155dd7..f4aa6e57 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -62,42 +62,14 @@ struct fsl_mc_io;
*/
#define DPCI_ALL_QUEUES (uint8_t)(-1)
-/**
- * dpci_open() - Open a control session for the specified object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpci_id: DPCI unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpci_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object.
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpci_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpci_id,
- uint16_t *token);
+ uint32_t cmd_flags,
+ int dpci_id,
+ uint16_t *token);
-/**
- * dpci_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* Enable the Order Restoration support
@@ -124,107 +96,37 @@ struct dpci_cfg {
uint8_t num_of_priorities;
};
-/**
- * dpci_create() - Create the DPCI object.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPCI object, allocate required resources and perform required
- * initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpci_cfg *cfg,
- uint32_t *obj_id);
+int dpci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpci_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpci_destroy() - Destroy the DPCI object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpci_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
+int dpci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
-/**
- * dpci_enable() - Enable the DPCI, allow sending and receiving frames.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpci_disable() - Disable the DPCI, stop sending and receiving frames.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpci_is_enabled() - Check if the DPCI is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_is_enabled(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
+int dpci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
-/**
- * dpci_reset() - Reset the DPCI, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpci_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpci_attr - Structure representing DPCI attributes
- * @id: DPCI object ID
+ * @id: DPCI object ID
* @num_of_priorities: Number of receive priorities
*/
struct dpci_attr {
@@ -232,19 +134,10 @@ struct dpci_attr {
uint8_t num_of_priorities;
};
-/**
- * dpci_get_attributes() - Retrieve DPCI attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpci_attr *attr);
+int dpci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpci_attr *attr);
/**
* enum dpci_dest - DPCI destination types
@@ -310,24 +203,11 @@ struct dpci_rx_queue_cfg {
struct dpci_dest_cfg dest_cfg;
};
-/**
- * dpci_set_rx_queue() - Set Rx queue configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- * @priority: Select the queue relative to number of
- * priorities configured at DPCI creation; use
- * DPCI_ALL_QUEUES to configure all Rx queues
- * identically.
- * @cfg: Rx queue configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t priority,
- const struct dpci_rx_queue_cfg *cfg);
+int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpci_rx_queue_cfg *cfg);
/**
* struct dpci_rx_queue_attr - Structure representing Rx queue attributes
@@ -337,26 +217,15 @@ int dpci_set_rx_queue(struct fsl_mc_io *mc_io,
* @fqid: Virtual FQID value to be used for dequeue operations
*/
struct dpci_rx_queue_attr {
- uint64_t user_ctx;
- struct dpci_dest_cfg dest_cfg;
- uint32_t fqid;
+ uint64_t user_ctx;
+ struct dpci_dest_cfg dest_cfg;
+ uint32_t fqid;
};
-/**
- * dpci_get_rx_queue() - Retrieve Rx queue attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- * @priority: Select the queue relative to number of
- * priorities configured at DPCI creation
- * @attr: Returned Rx queue attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t priority,
+int dpci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
struct dpci_rx_queue_attr *attr);
/**
@@ -370,35 +239,15 @@ struct dpci_tx_queue_attr {
uint32_t fqid;
};
-/**
- * dpci_get_tx_queue() - Retrieve Tx queue attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCI object
- * @priority: Select the queue relative to number of
- * priorities of the peer DPCI object
- * @attr: Returned Tx queue attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t priority,
- struct dpci_tx_queue_attr *attr);
+int dpci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpci_tx_queue_attr *attr);
-/**
- * dpci_get_api_version() - Get communication interface API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path communication interface API
- * @minor_ver: Minor version of data path communication interface API
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpci_get_api_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver);
+int dpci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
#endif /* __FSL_DPCI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
index 6d4e2730..73a551a5 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -40,108 +40,126 @@
#define _FSL_DPCI_CMD_H
/* DPCI Version */
-#define DPCI_VER_MAJOR 3
-#define DPCI_VER_MINOR 3
+#define DPCI_VER_MAJOR 3
+#define DPCI_VER_MINOR 3
-/* Command IDs */
-#define DPCI_CMDID_CLOSE 0x8001
-#define DPCI_CMDID_OPEN 0x8071
-#define DPCI_CMDID_CREATE 0x9072
-#define DPCI_CMDID_DESTROY 0x9871
-#define DPCI_CMDID_GET_API_VERSION 0xa071
-
-#define DPCI_CMDID_ENABLE 0x0021
-#define DPCI_CMDID_DISABLE 0x0031
-#define DPCI_CMDID_GET_ATTR 0x0041
-#define DPCI_CMDID_RESET 0x0051
-#define DPCI_CMDID_IS_ENABLED 0x0061
-
-#define DPCI_CMDID_SET_IRQ_ENABLE 0x0121
-#define DPCI_CMDID_GET_IRQ_ENABLE 0x0131
-#define DPCI_CMDID_SET_IRQ_MASK 0x0141
-#define DPCI_CMDID_GET_IRQ_MASK 0x0151
-#define DPCI_CMDID_GET_IRQ_STATUS 0x0161
-#define DPCI_CMDID_CLEAR_IRQ_STATUS 0x0171
-
-#define DPCI_CMDID_SET_RX_QUEUE 0x0e01
-#define DPCI_CMDID_GET_LINK_STATE 0x0e11
-#define DPCI_CMDID_GET_PEER_ATTR 0x0e21
-#define DPCI_CMDID_GET_RX_QUEUE 0x0e31
-#define DPCI_CMDID_GET_TX_QUEUE 0x0e41
-#define DPCI_CMDID_SET_OPR 0x0e51
-#define DPCI_CMDID_GET_OPR 0x0e61
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_CMD_OPEN(cmd, dpci_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpci_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_of_priorities);\
- MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_ATTRIBUTES(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, (attr)->id);\
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->num_of_priorities);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_PEER_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->peer_id);\
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_of_priorities);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_LINK_STATE(cmd, up) \
- MC_RSP_OP(cmd, 0, 0, 1, int, up)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority);\
- MC_CMD_OP(cmd, 0, 48, 4, enum dpci_dest, cfg->dest_cfg.dest_type);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
- MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_CMD_GET_RX_QUEUE(cmd, priority) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_RX_QUEUE(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
- MC_RSP_OP(cmd, 0, 48, 4, enum dpci_dest, attr->dest_cfg.dest_type);\
- MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
- MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_CMD_GET_TX_QUEUE(cmd, priority) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_TX_QUEUE(cmd, attr) \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCI_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
+#define DPCI_CMD_BASE_VERSION 1
+#define DPCI_CMD_BASE_VERSION_V2 2
+#define DPCI_CMD_ID_OFFSET 4
+
+#define DPCI_CMD_V1(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION)
+#define DPCI_CMD_V2(id) ((id << DPCI_CMD_ID_OFFSET) | DPCI_CMD_BASE_VERSION_V2)
+/* Command IDs */
+#define DPCI_CMDID_CLOSE DPCI_CMD_V1(0x800)
+#define DPCI_CMDID_OPEN DPCI_CMD_V1(0x807)
+#define DPCI_CMDID_CREATE DPCI_CMD_V2(0x907)
+#define DPCI_CMDID_DESTROY DPCI_CMD_V1(0x987)
+#define DPCI_CMDID_GET_API_VERSION DPCI_CMD_V1(0xa07)
+
+#define DPCI_CMDID_ENABLE DPCI_CMD_V1(0x002)
+#define DPCI_CMDID_DISABLE DPCI_CMD_V1(0x003)
+#define DPCI_CMDID_GET_ATTR DPCI_CMD_V1(0x004)
+#define DPCI_CMDID_RESET DPCI_CMD_V1(0x005)
+#define DPCI_CMDID_IS_ENABLED DPCI_CMD_V1(0x006)
+
+#define DPCI_CMDID_SET_RX_QUEUE DPCI_CMD_V1(0x0e0)
+#define DPCI_CMDID_GET_LINK_STATE DPCI_CMD_V1(0x0e1)
+#define DPCI_CMDID_GET_PEER_ATTR DPCI_CMD_V1(0x0e2)
+#define DPCI_CMDID_GET_RX_QUEUE DPCI_CMD_V1(0x0e3)
+#define DPCI_CMDID_GET_TX_QUEUE DPCI_CMD_V1(0x0e4)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPCI_MASK(field) \
+ GENMASK(DPCI_##field##_SHIFT + DPCI_##field##_SIZE - 1, \
+ DPCI_##field##_SHIFT)
+#define dpci_set_field(var, field, val) \
+ ((var) |= (((val) << DPCI_##field##_SHIFT) & DPCI_MASK(field)))
+#define dpci_get_field(var, field) \
+ (((var) & DPCI_MASK(field)) >> DPCI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpci_cmd_open {
+ uint32_t dpci_id;
+};
+
+struct dpci_cmd_create {
+ uint8_t num_of_priorities;
+ uint8_t pad[15];
+ uint32_t options;
+};
+
+struct dpci_cmd_destroy {
+ uint32_t dpci_id;
+};
+
+#define DPCI_ENABLE_SHIFT 0
+#define DPCI_ENABLE_SIZE 1
+
+struct dpci_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpci_rsp_get_attr {
+ uint32_t id;
+ uint16_t pad;
+ uint8_t num_of_priorities;
+};
+
+struct dpci_rsp_get_peer_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_of_priorities;
+};
+
+#define DPCI_UP_SHIFT 0
+#define DPCI_UP_SIZE 1
+
+struct dpci_rsp_get_link_state {
+ /* only the LSB bit */
+ uint8_t up;
+};
+
+#define DPCI_DEST_TYPE_SHIFT 0
+#define DPCI_DEST_TYPE_SIZE 4
+
+struct dpci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad1;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+};
+
+struct dpci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
#endif /* _FSL_DPCI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 0ed9db56..1da807f0 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -52,42 +52,14 @@ struct fsl_mc_io;
*/
#define DPCON_INVALID_DPIO_ID (int)(-1)
-/**
- * dpcon_open() - Open a control session for the specified object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpcon_id: DPCON unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpcon_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object.
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpcon_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpcon_id,
- uint16_t *token);
+ uint32_t cmd_flags,
+ int dpcon_id,
+ uint16_t *token);
-/**
- * dpcon_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpcon_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpcon_cfg - Structure representing DPCON configuration
@@ -97,109 +69,39 @@ struct dpcon_cfg {
uint8_t num_priorities;
};
-/**
- * dpcon_create() - Create the DPCON object.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPCON object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpcon_cfg *cfg,
- uint32_t *obj_id);
+int dpcon_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpcon_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpcon_destroy() - Destroy the DPCON object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpcon_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
+int dpcon_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t obj_id);
-/**
- * dpcon_enable() - Enable the DPCON
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpcon_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpcon_disable() - Disable the DPCON
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpcon_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpcon_is_enabled() - Check if the DPCON is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_is_enabled(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
-/**
- * dpcon_reset() - Reset the DPCON, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpcon_attr - Structure representing DPCON attributes
- * @id: DPCON object ID
- * @qbman_ch_id: Channel ID to be used by dequeue operation
- * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
*/
struct dpcon_attr {
int id;
@@ -207,29 +109,11 @@ struct dpcon_attr {
uint8_t num_priorities;
};
-/**
- * dpcon_get_attributes() - Retrieve DPCON attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPCON object
- * @attr: Object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpcon_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpcon_attr *attr);
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpcon_attr *attr);
-/**
- * dpcon_get_api_version() - Get Data Path Concentrator API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path concentrator API
- * @minor_ver: Minor version of data path concentrator API
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpcon_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
index f7f76902..4d0522c2 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -40,136 +41,68 @@
#define _FSL_DPCON_CMD_H
/* DPCON Version */
-#define DPCON_VER_MAJOR 3
-#define DPCON_VER_MINOR 2
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 3
-/* Command IDs */
-#define DPCON_CMDID_CLOSE ((0x800 << 4) | (0x1))
-#define DPCON_CMDID_OPEN ((0x808 << 4) | (0x1))
-#define DPCON_CMDID_CREATE ((0x908 << 4) | (0x1))
-#define DPCON_CMDID_DESTROY ((0x988 << 4) | (0x1))
-#define DPCON_CMDID_GET_API_VERSION ((0xa08 << 4) | (0x1))
-
-#define DPCON_CMDID_ENABLE ((0x002 << 4) | (0x1))
-#define DPCON_CMDID_DISABLE ((0x003 << 4) | (0x1))
-#define DPCON_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
-#define DPCON_CMDID_RESET ((0x005 << 4) | (0x1))
-#define DPCON_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
-
-#define DPCON_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
-#define DPCON_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
-#define DPCON_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
-#define DPCON_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
-#define DPCON_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
-#define DPCON_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
-#define DPCON_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
-#define DPCON_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
-
-#define DPCON_CMDID_SET_NOTIFICATION ((0x100 << 4) | (0x1))
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_OPEN(cmd, dpcon_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_CREATE(cmd, cfg) \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
- MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_GET_IRQ(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
- MC_RSP_OP(cmd, 2, 32, 32, int, type);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\
- MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPCON_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) ((id << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+#define DPCON_CMDID_CREATE DPCON_CMD(0x908)
+#define DPCON_CMDID_DESTROY DPCON_CMD(0x988)
+#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+#pragma pack(push, 1)
+struct dpcon_cmd_open {
+ uint32_t dpcon_id;
+};
+
+struct dpcon_cmd_create {
+ uint8_t num_priorities;
+};
+
+struct dpcon_cmd_destroy {
+ uint32_t object_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpcon_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t pad;
+};
+
+struct dpcon_cmd_set_notification {
+ uint32_t dpio_id;
+ uint8_t priority;
+ uint8_t pad[3];
+ uint64_t user_ctx;
+};
+
+struct dpcon_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#pragma pack(pop)
#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index 4448cca5..3d96adfc 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -46,44 +46,19 @@
struct fsl_mc_io;
-/**
- * dpio_open() - Open a control session for the specified object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpio_id: DPIO unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpio_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and any MC portals
- * assigned to the parent container; this token must be used in
- * all subsequent commands for this specific object.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpio_id,
- uint16_t *token);
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token);
-/**
- * dpio_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* enum dpio_channel_mode - DPIO notification channel mode
- * @DPIO_NO_CHANNEL: No support for notification channel
- * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
* dedicated channel in the DPIO; user should point the queue's
* destination in the relevant interface to this DPIO
*/
@@ -94,216 +69,94 @@ enum dpio_channel_mode {
/**
* struct dpio_cfg - Structure representing DPIO configuration
- * @channel_mode: Notification channel mode
- * @num_priorities: Number of priorities for the notification channel (1-8);
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
*/
struct dpio_cfg {
- enum dpio_channel_mode channel_mode;
- uint8_t num_priorities;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
};
-/**
- * dpio_create() - Create the DPIO object.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPIO object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpio_cfg *cfg,
- uint32_t *obj_id);
-/**
- * dpio_destroy() - Destroy the DPIO object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpio_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpio_enable() - Enable the DPIO, allow I/O portal operations.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpio_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
-/**
- * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpio_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpio_is_enabled() - Check if the DPIO is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_is_enabled(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpio_reset() - Reset the DPIO, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
-/**
- * dpio_set_stashing_destination() - Set the stashing destination.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @sdest: stashing destination value
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t sdest);
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpio_get_stashing_destination() - Get the stashing destination..
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @sdest: Returns the stashing destination value
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t *sdest);
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest);
-/**
- * dpio_add_static_dequeue_channel() - Add a static dequeue channel.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @dpcon_id: DPCON object ID
- * @channel_index: Returned channel index to be used in qbman API
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int dpcon_id,
- uint8_t *channel_index);
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest);
-/**
- * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @dpcon_id: DPCON object ID
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int dpcon_id);
+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id,
+ uint8_t *channel_index);
+
+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int dpcon_id);
/**
* struct dpio_attr - Structure representing DPIO attributes
- * @id: DPIO object ID
- * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
- * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
- * @qbman_portal_id: Software portal ID
- * @channel_mode: Notification channel mode
- * @num_priorities: Number of priorities for the notification channel (1-8);
- * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
- * @qbman_version: QBMAN version
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: Offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: Offset of the software portal
+ * cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification
+ * channel (1-8); relevant only if
+ * 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
*/
struct dpio_attr {
- int id;
- uint64_t qbman_portal_ce_offset;
- uint64_t qbman_portal_ci_offset;
- uint16_t qbman_portal_id;
- enum dpio_channel_mode channel_mode;
- uint8_t num_priorities;
- uint32_t qbman_version;
- uint32_t clk;
+ int id;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint16_t qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+ uint32_t qbman_version;
+ uint32_t clk;
};
-/**
- * dpio_get_attributes() - Retrieve DPIO attributes
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPIO object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise
- */
-int dpio_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpio_attr *attr);
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr);
-/**
- * dpio_get_api_version() - Get Data Path I/O API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path i/o API
- * @minor_ver: Minor version of data path i/o API
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpio_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
index d7575077..3e9e1f68 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,82 +41,108 @@
#define _FSL_DPIO_CMD_H
/* DPIO Version */
-#define DPIO_VER_MAJOR 4
-#define DPIO_VER_MINOR 2
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+#define DPIO_CMD_BASE_VERSION 1
+#define DPIO_CMD_ID_OFFSET 4
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
/* Command IDs */
-#define DPIO_CMDID_CLOSE 0x8001
-#define DPIO_CMDID_OPEN 0x8031
-#define DPIO_CMDID_CREATE 0x9031
-#define DPIO_CMDID_DESTROY 0x9831
-#define DPIO_CMDID_GET_API_VERSION 0xa031
-
-#define DPIO_CMDID_ENABLE 0x0021
-#define DPIO_CMDID_DISABLE 0x0031
-#define DPIO_CMDID_GET_ATTR 0x0041
-#define DPIO_CMDID_RESET 0x0051
-#define DPIO_CMDID_IS_ENABLED 0x0061
-
-#define DPIO_CMDID_SET_STASHING_DEST 0x1201
-#define DPIO_CMDID_GET_STASHING_DEST 0x1211
-#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x1221
-#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x1231
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_CMD_OPEN(cmd, dpio_id) \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, dpio_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \
- cfg->channel_mode);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_RSP_GET_ATTRIBUTES(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, (attr)->id);\
- MC_RSP_OP(cmd, 0, 32, 16, uint16_t, (attr)->qbman_portal_id);\
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->num_priorities);\
- MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode,\
- (attr)->channel_mode);\
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, (attr)->qbman_portal_ce_offset);\
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (attr)->qbman_portal_ci_offset);\
- MC_RSP_OP(cmd, 3, 0, 32, uint32_t, (attr)->qbman_version);\
- MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (attr)->clk);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPIO_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_CREATE DPIO_CMD(0x903)
+#define DPIO_CMDID_DESTROY DPIO_CMD(0x983)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_IS_ENABLED DPIO_CMD(0x006)
+
+#define DPIO_CMDID_SET_IRQ_ENABLE DPIO_CMD(0x012)
+#define DPIO_CMDID_GET_IRQ_ENABLE DPIO_CMD(0x013)
+#define DPIO_CMDID_SET_IRQ_MASK DPIO_CMD(0x014)
+#define DPIO_CMDID_GET_IRQ_MASK DPIO_CMD(0x015)
+#define DPIO_CMDID_GET_IRQ_STATUS DPIO_CMD(0x016)
+#define DPIO_CMDID_CLEAR_IRQ_STATUS DPIO_CMD(0x017)
+
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
+#define DPIO_CMDID_GET_STASHING_DEST DPIO_CMD(0x121)
+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x122)
+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL DPIO_CMD(0x123)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPIO_MASK(field) \
+ GENMASK(DPIO_##field##_SHIFT + DPIO_##field##_SIZE - 1, \
+ DPIO_##field##_SHIFT)
+#define dpio_set_field(var, field, val) \
+ ((var) |= (((val) << DPIO_##field##_SHIFT) & DPIO_MASK(field)))
+#define dpio_get_field(var, field) \
+ (((var) & DPIO_MASK(field)) >> DPIO_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpio_cmd_open {
+ uint32_t dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_SHIFT 0
+#define DPIO_CHANNEL_MODE_SIZE 2
+
+struct dpio_cmd_create {
+ uint16_t pad1;
+ /* from LSB: channel_mode:2 */
+ uint8_t channel_mode;
+ uint8_t pad2;
+ uint8_t num_priorities;
+};
+
+struct dpio_cmd_destroy {
+ uint32_t dpio_id;
+};
+
+#define DPIO_ENABLE_SHIFT 0
+#define DPIO_ENABLE_SIZE 1
+
+struct dpio_rsp_is_enabled {
+ /* only the LSB */
+ uint8_t en;
+};
+
+#define DPIO_ATTR_CHANNEL_MODE_SHIFT 0
+#define DPIO_ATTR_CHANNEL_MODE_SIZE 4
+
+struct dpio_rsp_get_attr {
+ uint32_t id;
+ uint16_t qbman_portal_id;
+ uint8_t num_priorities;
+ /* from LSB: channel_mode:4 */
+ uint8_t channel_mode;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint32_t qbman_version;
+ uint32_t pad;
+ uint32_t clk;
+};
+
+struct dpio_stashing_dest {
+ uint8_t sdest;
+};
+
+struct dpio_cmd_static_dequeue_channel {
+ uint32_t dpcon_id;
+};
+
+struct dpio_rsp_add_static_dequeue_channel {
+ uint8_t channel_index;
+};
+
+struct dpio_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+#pragma pack(pop)
#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index c2ddde09..97030e4c 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,7 +40,8 @@
#ifndef __FSL_DPMNG_H
#define __FSL_DPMNG_H
-/* Management Complex General API
+/*
+ * Management Complex General API
* Contains general API for the Management Complex firmware
*/
@@ -49,10 +51,10 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 1
+#define MC_VER_MINOR 3
/**
- * struct mc_versoin
+ * struct mc_version
* @major: Major version number: incremented on API compatibility changes
* @minor: Minor version number: incremented on API additions (that are
* backward compatible); reset when major version is incremented
@@ -65,42 +67,21 @@ struct mc_version {
uint32_t revision;
};
+int mc_get_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ struct mc_version *mc_ver_info);
+
/**
* struct mc_platform
- * @svr: system version (content of platform SVR register)
- * @pvr: processor version (content of platform PVR register)
+ * @svr: System version (content of platform SVR register)
+ * @pvr: Processor version (content of platform PVR register)
*/
struct mc_soc_version {
uint32_t svr;
uint32_t pvr;
};
-/**
- * mc_get_version() - Retrieves the Management Complex firmware
- * version information
- * @mc_io: Pointer to opaque I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @mc_ver_info: Returned version information structure
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int mc_get_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- struct mc_version *mc_ver_info);
-
-/**
- * mc_get_soc_version() - Retrieves the Management Complex firmware
- * version information
- * @mc_io: Pointer to opaque I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @mc_platform_info: Returned version information structure. The structure
- * contains the values of SVR and PVR registers. Please consult platform
- * specific reference manual for detailed information.
- *
- * Return: '0' on Success; Error code otherwise.
- */
int mc_get_soc_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
struct mc_soc_version *mc_platform_info);
-
#endif /* __FSL_DPMNG_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
index 3a36b6dc..4c0a6297 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,26 +37,32 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
+
#ifndef __FSL_DPMNG_CMD_H
#define __FSL_DPMNG_CMD_H
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
/* Command IDs */
-#define DPMNG_CMDID_GET_VERSION 0x8311
-#define DPMNG_CMDID_GET_SOC_VERSION 0x8321
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMNG_RSP_GET_VERSION(cmd, mc_ver_info) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_ver_info->revision); \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_ver_info->major); \
- MC_RSP_OP(cmd, 1, 0, 32, uint32_t, mc_ver_info->minor); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPMNG_RSP_GET_SOC_VERSION(cmd, mc_soc_version) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mc_soc_version->svr); \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, mc_soc_version->pvr); \
-} while (0)
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+#define DPMNG_CMDID_GET_SOC_VERSION DPMNG_CMD(0x832)
+
+#pragma pack(push, 1)
+struct dpmng_rsp_get_version {
+ uint32_t revision;
+ uint32_t version_major;
+ uint32_t version_minor;
+};
+
+struct dpmng_rsp_get_soc_version {
+ uint32_t svr;
+ uint32_t pvr;
+};
+
+#pragma pack(pop)
#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index 0ca4345c..2cec29ea 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -40,146 +40,118 @@
#ifndef __FSL_MC_CMD_H
#define __FSL_MC_CMD_H
-#define MC_CMD_NUM_OF_PARAMS 7
-
-#define MAKE_UMASK64(_width) \
- ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
- (uint64_t)-1))
+#include <rte_byteorder.h>
+#include <stdint.h>
-static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
-{
- return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
-}
+#define MC_CMD_NUM_OF_PARAMS 7
-static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
-{
- return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
-}
+#define phys_addr_t uint64_t
+
+#define u64 uint64_t
+#define u32 uint32_t
+#define u16 uint16_t
+#define u8 uint8_t
+
+#define cpu_to_le64 rte_cpu_to_le_64
+#define cpu_to_le32 rte_cpu_to_le_32
+#define cpu_to_le16 rte_cpu_to_le_16
+
+#define le64_to_cpu rte_le_to_cpu_64
+#define le32_to_cpu rte_le_to_cpu_32
+#define le16_to_cpu rte_le_to_cpu_16
+
+#define BITS_PER_LONG 64
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+struct mc_cmd_header {
+ union {
+ struct {
+ uint8_t src_id;
+ uint8_t flags_hw;
+ uint8_t status;
+ uint8_t flags_sw;
+ uint16_t token;
+ uint16_t cmd_id;
+ };
+ uint32_t word[2];
+ };
+};
struct mc_command {
uint64_t header;
uint64_t params[MC_CMD_NUM_OF_PARAMS];
};
-/**
- * enum mc_cmd_status - indicates MC status at command response
- * @MC_CMD_STATUS_OK: Completed successfully
- * @MC_CMD_STATUS_READY: Ready to be processed
- * @MC_CMD_STATUS_AUTH_ERR: Authentication error
- * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege
- * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error
- * @MC_CMD_STATUS_CONFIG_ERR: Configuration error
- * @MC_CMD_STATUS_TIMEOUT: Operation timed out
- * @MC_CMD_STATUS_NO_RESOURCE: No resources
- * @MC_CMD_STATUS_NO_MEMORY: No memory available
- * @MC_CMD_STATUS_BUSY: Device is busy
- * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation
- * @MC_CMD_STATUS_INVALID_STATE: Invalid state
- */
-enum mc_cmd_status {
- MC_CMD_STATUS_OK = 0x0,
- MC_CMD_STATUS_READY = 0x1,
- MC_CMD_STATUS_AUTH_ERR = 0x3,
- MC_CMD_STATUS_NO_PRIVILEGE = 0x4,
- MC_CMD_STATUS_DMA_ERR = 0x5,
- MC_CMD_STATUS_CONFIG_ERR = 0x6,
- MC_CMD_STATUS_TIMEOUT = 0x7,
- MC_CMD_STATUS_NO_RESOURCE = 0x8,
- MC_CMD_STATUS_NO_MEMORY = 0x9,
- MC_CMD_STATUS_BUSY = 0xA,
- MC_CMD_STATUS_UNSUPPORTED_OP = 0xB,
- MC_CMD_STATUS_INVALID_STATE = 0xC
+struct mc_rsp_create {
+ uint32_t object_id;
};
-/* MC command flags */
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
-/**
- * High priority flag
- */
-#define MC_CMD_FLAG_PRI 0x00008000
-/**
- * Command completion flag
+/*
+ * MC command flags
*/
-#define MC_CMD_FLAG_INTR_DIS 0x01000000
-/**
- * Command ID field offset
- */
-#define MC_CMD_HDR_CMDID_O 48
-/**
- * Command ID field size
- */
-#define MC_CMD_HDR_CMDID_S 16
-/**
- * Token field offset
- */
-#define MC_CMD_HDR_TOKEN_O 32
-/**
- * Token field size
- */
-#define MC_CMD_HDR_TOKEN_S 16
-/**
- * Status field offset
- */
-#define MC_CMD_HDR_STATUS_O 16
-/**
- * Status field size
- */
-#define MC_CMD_HDR_STATUS_S 8
-/**
- * Flags field offset
- */
-#define MC_CMD_HDR_FLAGS_O 0
-/**
- * Flags field size
- */
-#define MC_CMD_HDR_FLAGS_S 32
-/**
- * Command flags mask
- */
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
-#define MC_CMD_HDR_READ_STATUS(_hdr) \
- ((enum mc_cmd_status)mc_dec((_hdr), \
- MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
-#define MC_CMD_HDR_READ_TOKEN(_hdr) \
- ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ uint64_t header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
-#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \
- ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg)))
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ hdr->word[0] |= cpu_to_le32(cmd_flags & MC_CMD_HDR_FLAGS_MASK);
-#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
- (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width)))
+ return header;
+}
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+static inline uint16_t mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint16_t token = le16_to_cpu(hdr->token);
-#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
- (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
+ return token;
+}
-/* cmd, param, offset, width, type, arg_name */
-#define CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, object_id) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, object_id)
+static inline uint32_t mc_cmd_read_object_id(struct mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
-/* cmd, param, offset, width, type, arg_name */
-#define CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id) \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, object_id)
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
-static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
- uint32_t cmd_flags,
- uint16_t token)
+static inline enum mc_cmd_status mc_cmd_read_status(struct mc_command *cmd)
{
- uint64_t hdr;
-
- hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
- hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
- (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
- hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
- hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
- MC_CMD_STATUS_READY);
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ uint8_t status = hdr->status;
- return hdr;
+ return (enum mc_cmd_status)status;
}
/**
@@ -191,20 +163,17 @@ static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
static inline void mc_write_command(struct mc_command __iomem *portal,
struct mc_command *cmd)
{
- int i;
- uint32_t word;
+ struct mc_cmd_header *cmd_header = (struct mc_cmd_header *)&cmd->header;
char *header = (char *)&portal->header;
+ int i;
/* copy command parameters into the portal */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
iowrite64(cmd->params[i], &portal->params[i]);
/* submit the command by writing the header */
- word = (uint32_t)mc_dec(cmd->header, 32, 32);
- iowrite32(word, (((uint32_t *)header) + 1));
-
- word = (uint32_t)mc_dec(cmd->header, 0, 32);
- iowrite32(word, (uint32_t *)header);
+ iowrite32(le32_to_cpu(cmd_header->word[1]), (((uint32_t *)header) + 1));
+ iowrite32(le32_to_cpu(cmd_header->word[0]), (uint32_t *)header);
}
/**
@@ -225,7 +194,7 @@ static inline enum mc_cmd_status mc_read_response(
/* Copy command response header from MC portal: */
resp->header = ioread64(&portal->header);
- status = MC_CMD_HDR_READ_STATUS(resp->header);
+ status = mc_cmd_read_status(resp);
if (status != MC_CMD_STATUS_OK)
return status;
diff --git a/drivers/bus/fslmc/mc/fsl_mc_sys.h b/drivers/bus/fslmc/mc/fsl_mc_sys.h
index ebada60e..d803205e 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_sys.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_sys.h
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -65,40 +66,31 @@ struct fsl_mc_io {
#include <sys/uio.h>
#include <linux/byteorder/little_endian.h>
-#define cpu_to_le64(x) __cpu_to_le64(x)
#ifndef dmb
#define dmb() {__asm__ __volatile__("" : : : "memory"); }
#endif
-#define __iormb() dmb()
-#define __iowmb() dmb()
-#define __arch_getq(a) (*(volatile unsigned long *)(a))
-#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v))
-#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v))
-#define readq(c) \
+#define __iormb() dmb()
+#define __iowmb() dmb()
+#define __arch_getq(a) (*(volatile uint64_t *)(a))
+#define __arch_putq(v, a) (*(volatile uint64_t *)(a) = (v))
+#define __arch_putq32(v, a) (*(volatile uint32_t *)(a) = (v))
+#define readq(c) \
({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
-#define writeq(v, c) \
+#define writeq(v, c) \
({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
#define writeq32(v, c) \
({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
-#define ioread64(_p) readq(_p)
-#define iowrite64(_v, _p) writeq(_v, _p)
-#define iowrite32(_v, _p) writeq32(_v, _p)
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+#define iowrite32(_v, _p) writeq32(_v, _p)
#define __iomem
-struct fsl_mc_io {
- void *regs;
-};
-
-#ifndef ENOTSUP
-#define ENOTSUP 95
-#endif
-
/*GPP is supposed to use MC commands with low priority*/
#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
-struct mc_command;
-
-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
+struct fsl_mc_io {
+ void *regs;
+};
#endif /* __linux_driver__ */
diff --git a/drivers/bus/fslmc/mc/mc_sys.c b/drivers/bus/fslmc/mc/mc_sys.c
index 45731658..f0f9a268 100644
--- a/drivers/bus/fslmc/mc/mc_sys.c
+++ b/drivers/bus/fslmc/mc/mc_sys.c
@@ -5,6 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -86,6 +87,7 @@ static int mc_status_to_error(enum mc_cmd_status status)
int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
{
enum mc_cmd_status status;
+ uint64_t response;
if (!mc_io || !mc_io->regs)
return -EACCES;
@@ -97,7 +99,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
/* Spin until status changes */
do {
- status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs));
+ response = ioread64(mc_io->regs);
+ status = mc_cmd_read_status((struct mc_command *)&response);
/* --- Call wait function here to prevent blocking ---
* Change the loop condition accordingly to exit on timeout.
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 33f9eedf..334e1f5a 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -48,7 +48,7 @@
#include <rte_ethdev.h>
#include <fslmc_logs.h>
-#include <fslmc_vfio.h>
+#include <rte_fslmc.h>
#include <mc/fsl_dpbp.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
@@ -58,7 +58,7 @@ static struct dpbp_dev_list dpbp_dev_list
= TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
static int
-dpaa2_create_dpbp_device(struct fslmc_vfio_device *vdev __rte_unused,
+dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
struct vfio_device_info *obj_info __rte_unused,
int dpbp_id)
{
@@ -98,7 +98,7 @@ dpaa2_create_dpbp_device(struct fslmc_vfio_device *vdev __rte_unused,
TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
- PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpbp.%d]", dpbp_id);
+ RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id);
return 0;
}
@@ -129,8 +129,15 @@ void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
}
}
+int dpaa2_dpbp_supported(void)
+{
+ if (TAILQ_EMPTY(&dpbp_dev_list))
+ return -1;
+ return 0;
+}
+
static struct rte_dpaa2_object rte_dpaa2_dpbp_obj = {
- .object_id = DPAA2_MC_DPBP_DEVID,
+ .dev_type = DPAA2_BPOOL,
.create = dpaa2_create_dpbp_device,
};
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 478e4f7e..ae189c72 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -47,7 +47,7 @@
#include <rte_ethdev.h>
#include <fslmc_logs.h>
-#include <fslmc_vfio.h>
+#include <rte_fslmc.h>
#include <mc/fsl_dpci.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
@@ -57,9 +57,9 @@ static struct dpci_dev_list dpci_dev_list
= TAILQ_HEAD_INITIALIZER(dpci_dev_list); /*!< DPCI device list */
static int
-rte_dpaa2_create_dpci_device(struct fslmc_vfio_device *vdev __rte_unused,
+rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
struct vfio_device_info *obj_info __rte_unused,
- int dpci_id)
+ int dpci_id)
{
struct dpaa2_dpci_dev *dpci_node;
struct dpci_attr attr;
@@ -140,7 +140,7 @@ rte_dpaa2_create_dpci_device(struct fslmc_vfio_device *vdev __rte_unused,
TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
- PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpci.%d]", dpci_id);
+ RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpci.%d]\n", dpci_id);
return 0;
}
@@ -172,7 +172,7 @@ void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci)
}
static struct rte_dpaa2_object rte_dpaa2_dpci_obj = {
- .object_id = DPAA2_MC_DPCI_DEVID,
+ .dev_type = DPAA2_CI,
.create = rte_dpaa2_create_dpci_device,
};
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 283441b4..f00070f3 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -59,7 +59,7 @@
#include <rte_dev.h>
#include <fslmc_logs.h>
-#include <fslmc_vfio.h>
+#include <rte_fslmc.h>
#include "dpaa2_hw_pvt.h"
#include "dpaa2_hw_dpio.h"
#include <mc/fsl_dpmng.h>
@@ -90,20 +90,22 @@ static int dpaa2_cluster_sz = 2;
* Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
* Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
*/
-
-/* Set the STASH Destination depending on Current CPU ID.
- * e.g. Valid values of SDEST are 4,5,6,7. Where,
- * CPU 0-1 will have SDEST 4
- * CPU 2-3 will have SDEST 5.....and so on.
+/* For LX2160 platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x00) : CPU0, CPU1;
+ * Cluster 2 (ID = x01) : CPU2, CPU3;
+ * Cluster 3 (ID = x02) : CPU4, CPU5;
+ * Cluster 4 (ID = x03) : CPU6, CPU7;
+ * Cluster 1 (ID = x04) : CPU8, CPU9;
+ * Cluster 2 (ID = x05) : CPU10, CP11;
+ * Cluster 3 (ID = x06) : CPU12, CPU13;
+ * Cluster 4 (ID = x07) : CPU14, CPU15;
*/
+
static int
dpaa2_core_cluster_sdest(int cpu_id)
{
int x = cpu_id / dpaa2_cluster_sz;
- if (x > 3)
- x = 3;
-
return dpaa2_core_cluster_base + x;
}
@@ -208,7 +210,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
- PMD_DRV_LOG(DEBUG, "\t Allocated DPIO Portal[%p]", dpio_dev->dpio);
+ PMD_DRV_LOG(DEBUG, "Allocated DPIO Portal[%p]", dpio_dev->dpio);
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
@@ -240,8 +242,6 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
return -1;
}
- PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
-
/* Configure & setup SW portal */
p_des.block = NULL;
p_des.idx = attr.qbman_portal_id;
@@ -278,6 +278,10 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
dpaa2_core_cluster_base = 0x02;
dpaa2_cluster_sz = 4;
PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
+ dpaa2_core_cluster_base = 0x00;
+ dpaa2_cluster_sz = 2;
+ PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected");
}
first_time = 1;
}
@@ -428,13 +432,12 @@ dpaa2_affine_qbman_swp_sec(void)
}
static int
-dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
+dpaa2_create_dpio_device(int vdev_fd,
struct vfio_device_info *obj_info,
int object_id)
{
struct dpaa2_dpio_dev *dpio_dev;
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
- int vfio_dev_fd;
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
@@ -451,14 +454,12 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
dpio_dev->dpio = NULL;
dpio_dev->hw_id = object_id;
- dpio_dev->intr_handle.vfio_dev_fd = vdev->fd;
rte_atomic16_init(&dpio_dev->ref_count);
/* Using single portal for all devices */
dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
reg_info.index = 0;
- vfio_dev_fd = dpio_dev->intr_handle.vfio_dev_fd;
- if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
rte_free(dpio_dev);
return -1;
@@ -467,10 +468,10 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
dpio_dev->ce_size = reg_info.size;
dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- vfio_dev_fd, reg_info.offset);
+ vdev_fd, reg_info.offset);
reg_info.index = 1;
- if (ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
rte_free(dpio_dev);
return -1;
@@ -479,7 +480,7 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
dpio_dev->ci_size = reg_info.size;
dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
- vfio_dev_fd, reg_info.offset);
+ vdev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
PMD_INIT_LOG(ERR,
@@ -491,8 +492,15 @@ dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
io_space_count++;
dpio_dev->index = io_space_count;
+
+ if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
+ PMD_INIT_LOG(ERR, "Fail to setup interrupt for %d\n",
+ dpio_dev->hw_id);
+ rte_free(dpio_dev);
+ }
+
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
- PMD_INIT_LOG(DEBUG, "DPAA2: Added [dpio.%d]", object_id);
+ RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id);
return 0;
}
@@ -529,7 +537,7 @@ fail:
}
static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
- .object_id = DPAA2_MC_DPIO_DEVID,
+ .dev_type = DPAA2_IO,
.create = dpaa2_create_dpio_device,
};
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 5d7a8282..c1b842f3 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -51,6 +51,7 @@
#define SVR_LS1080A 0x87030000
#define SVR_LS2080A 0x87010000
#define SVR_LS2088A 0x87090000
+#define SVR_LX2160A 0x87360000
#ifndef ETH_VLAN_HLEN
#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
@@ -124,9 +125,12 @@ struct queue_storage_info_t {
int toggle;
};
+struct dpaa2_queue;
+
typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
struct rte_event *ev);
struct dpaa2_queue {
@@ -143,6 +147,7 @@ struct dpaa2_queue {
struct queue_storage_info_t *q_storage;
struct qbman_result *cscn;
};
+ struct rte_event ev;
dpaa2_queue_cb_dqrr_t *cb;
};
@@ -309,7 +314,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
* These routines are called with help of below MACRO's
*/
-#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
/**
@@ -365,6 +370,7 @@ void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
}
struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+int dpaa2_dpbp_supported(void);
struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 529f1ea3..423087cb 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -30,32 +30,17 @@
#ifndef HEADER_COMPAT_H
#define HEADER_COMPAT_H
-#include <sched.h>
-
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdint.h>
#include <stdlib.h>
-#include <stddef.h>
#include <errno.h>
#include <string.h>
-#include <pthread.h>
-#include <net/ethernet.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <ctype.h>
#include <malloc.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <unistd.h>
-#include <sys/mman.h>
-#include <limits.h>
-#include <assert.h>
-#include <dirent.h>
-#include <inttypes.h>
#include <error.h>
+#include <linux/types.h>
#include <rte_atomic.h>
/* The following definitions are primarily to allow the single-source driver
@@ -65,53 +50,11 @@
*/
/* Required compiler attributes */
-#define __user
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
-#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
-#undef container_of
-#define container_of(ptr, type, member) ({ \
- typeof(((type *)0)->member)(*__mptr) = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
-
-#ifdef ARRAY_SIZE
-#undef ARRAY_SIZE
-#endif
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
/* Required types */
-typedef uint8_t u8;
-typedef uint16_t u16;
-typedef uint32_t u32;
-typedef uint64_t u64;
typedef uint64_t dma_addr_t;
-typedef cpu_set_t cpumask_t;
-typedef u32 compat_uptr_t;
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
-/* I/O operations */
-static inline u32 in_be32(volatile void *__p)
-{
- volatile u32 *p = __p;
- return *p;
-}
-
-static inline void out_be32(volatile void *__p, u32 val)
-{
- volatile u32 *p = __p;
- *p = val;
-}
/* Debugging */
#define prflush(fmt, args...) \
@@ -124,275 +67,46 @@ static inline void out_be32(volatile void *__p, u32 val)
#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
#define pr_info(fmt, args...) prflush(fmt, ##args)
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_BUS
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required.
+ */
+#define QBMAN_CCSR_TRACE
+#define QBMAN_CINH_TRACE
+#define QBMAN_CENA_TRACE
+
+#define QBMAN_CHECKING
+
#ifdef pr_debug
#undef pr_debug
#endif
-#define pr_debug(fmt, args...) {}
-#define might_sleep_if(c) {}
-#define msleep(x) {}
-#define WARN_ON(c, str) \
+#define pr_debug(fmt, args...) printf(fmt, ##args)
+#define QBMAN_BUG_ON(c) \
do { \
static int warned_##__LINE__; \
if ((c) && !warned_##__LINE__) { \
- pr_warn("%s\n", str); \
pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
warned_##__LINE__ = 1; \
} \
} while (0)
-#ifdef CONFIG_BUGON
-#define QBMAN_BUG_ON(c) WARN_ON(c, "BUG")
#else
#define QBMAN_BUG_ON(c) {}
+#define pr_debug(fmt, args...) {}
#endif
-#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
-
-/****************/
-/* Linked-lists */
-/****************/
-
-struct list_head {
- struct list_head *prev;
- struct list_head *next;
-};
-
-#define LIST_HEAD(n) \
-struct list_head n = { \
- .prev = &n, \
- .next = &n \
-}
-
-#define INIT_LIST_HEAD(p) \
-do { \
- struct list_head *__p298 = (p); \
- __p298->next = __p298; \
- __p298->prev = __p298->next; \
-} while (0)
-#define list_entry(node, type, member) \
- (type *)((void *)node - offsetof(type, member))
-#define list_empty(p) \
-({ \
- const struct list_head *__p298 = (p); \
- ((__p298->next == __p298) && (__p298->prev == __p298)); \
-})
-#define list_add(p, l) \
-do { \
- struct list_head *__p298 = (p); \
- struct list_head *__l298 = (l); \
- __p298->next = __l298->next; \
- __p298->prev = __l298; \
- __l298->next->prev = __p298; \
- __l298->next = __p298; \
-} while (0)
-#define list_add_tail(p, l) \
-do { \
- struct list_head *__p298 = (p); \
- struct list_head *__l298 = (l); \
- __p298->prev = __l298->prev; \
- __p298->next = __l298; \
- __l298->prev->next = __p298; \
- __l298->prev = __p298; \
-} while (0)
-#define list_for_each(i, l) \
- for (i = (l)->next; i != (l); i = i->next)
-#define list_for_each_safe(i, j, l) \
- for (i = (l)->next, j = i->next; i != (l); \
- i = j, j = i->next)
-#define list_for_each_entry(i, l, name) \
- for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
- i = list_entry(i->name.next, typeof(*i), name))
-#define list_for_each_entry_safe(i, j, l, name) \
- for (i = list_entry((l)->next, typeof(*i), name), \
- j = list_entry(i->name.next, typeof(*j), name); \
- &i->name != (l); \
- i = j, j = list_entry(j->name.next, typeof(*j), name))
-#define list_del(i) \
-do { \
- (i)->next->prev = (i)->prev; \
- (i)->prev->next = (i)->next; \
-} while (0)
-
/* Other miscellaneous interfaces our APIs depend on; */
-#define lower_32_bits(x) ((u32)(x))
-#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
-/* Compiler/type stuff */
-typedef unsigned int gfp_t;
-typedef uint32_t phandle;
#define __iomem
-#define EINTR 4
-#define ENODEV 19
-#define GFP_KERNEL 0
+
#define __raw_readb(p) (*(const volatile unsigned char *)(p))
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
-/* memcpy() stuff - when you know alignments in advance */
-#ifdef CONFIG_TRY_BETTER_MEMCPY
-static inline void copy_words(void *dest, const void *src, size_t sz)
-{
- u32 *__dest = dest;
- const u32 *__src = src;
- size_t __sz = sz >> 2;
-
- QBMAN_BUG_ON((unsigned long)dest & 0x3);
- QBMAN_BUG_ON((unsigned long)src & 0x3);
- QBMAN_BUG_ON(sz & 0x3);
- while (__sz--)
- *(__dest++) = *(__src++);
-}
-
-static inline void copy_shorts(void *dest, const void *src, size_t sz)
-{
- u16 *__dest = dest;
- const u16 *__src = src;
- size_t __sz = sz >> 1;
-
- QBMAN_BUG_ON((unsigned long)dest & 0x1);
- QBMAN_BUG_ON((unsigned long)src & 0x1);
- QBMAN_BUG_ON(sz & 0x1);
- while (__sz--)
- *(__dest++) = *(__src++);
-}
-
-static inline void copy_bytes(void *dest, const void *src, size_t sz)
-{
- u8 *__dest = dest;
- const u8 *__src = src;
-
- while (sz--)
- *(__dest++) = *(__src++);
-}
-#else
-#define copy_words memcpy
-#define copy_shorts memcpy
-#define copy_bytes memcpy
-#endif
-
-/* Completion stuff */
-#define DECLARE_COMPLETION(n) int n = 0
-#define complete(n) { *n = 1; }
-#define wait_for_completion(n) \
-do { \
- while (!*n) { \
- bman_poll(); \
- qman_poll(); \
- } \
- *n = 0; \
-} while (0)
-
-/* Allocator stuff */
-#define kmalloc(sz, t) malloc(sz)
-#define vmalloc(sz) malloc(sz)
-#define kfree(p) { if (p) free(p); }
-static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
-{
- void *ptr = malloc(sz);
-
- if (ptr)
- memset(ptr, 0, sz);
- return ptr;
-}
-
-static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
-{
- void *p;
-
- if (posix_memalign(&p, 4096, 4096))
- return 0;
- memset(p, 0, 4096);
- return (unsigned long)p;
-}
-
-static inline void free_page(unsigned long p)
-{
- free((void *)p);
-}
-
-/* Bitfield stuff. */
-#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
-#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
-#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
-#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
-static inline unsigned long test_bits(unsigned long mask,
- volatile unsigned long *p)
-{
- return *p & mask;
-}
-
-static inline int test_bit(int idx, volatile unsigned long *bits)
-{
- return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline void set_bits(unsigned long mask, volatile unsigned long *p)
-{
- *p |= mask;
-}
-
-static inline void set_bit(int idx, volatile unsigned long *bits)
-{
- set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
-{
- *p &= ~mask;
-}
-
-static inline void clear_bit(int idx, volatile unsigned long *bits)
-{
- clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
-}
-
-static inline unsigned long test_and_set_bits(unsigned long mask,
- volatile unsigned long *p)
-{
- unsigned long ret = test_bits(mask, p);
-
- set_bits(mask, p);
- return ret;
-}
-
-static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
-{
- int ret = test_bit(idx, bits);
-
- set_bit(idx, bits);
- return ret;
-}
-
-static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
-{
- int ret = test_bit(idx, bits);
-
- clear_bit(idx, bits);
- return ret;
-}
-
-static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
-{
- while ((++idx < limit) && test_bit(idx, bits))
- ;
- return idx;
-}
-
-static inline int find_first_zero_bit(unsigned long *bits, int limit)
-{
- int idx = 0;
-
- while (test_bit(idx, bits) && (++idx < limit))
- ;
- return idx;
-}
-
-static inline u64 div64_u64(u64 n, u64 d)
-{
- return n / d;
-}
-
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
#define atomic_set(v, i) rte_atomic32_set(v, i)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index ee4b772c..14159609 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -38,10 +38,6 @@ typedef uint64_t dma_addr_t;
*
*/
-#define QMAN_REV_4000 0x04000000
-#define QMAN_REV_4100 0x04010000
-#define QMAN_REV_4101 0x04010001
-
/**
* struct qbman_block_desc - qbman block descriptor structure
* @ccsr_reg_bar: CCSR register map.
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 9e9047e2..1e656602 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -194,11 +194,38 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
/**
* struct qbman_result - structure for qbman dequeue response and/or
* notification.
- * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
* possible qbman dequeue result.
*/
struct qbman_result {
- uint32_t dont_manipulate_directly[16];
+ union {
+ struct common {
+ uint8_t verb;
+ uint8_t reserved[63];
+ } common;
+ struct dq {
+ uint8_t verb;
+ uint8_t stat;
+ __le16 seqnum;
+ __le16 oprid;
+ uint8_t reserved;
+ uint8_t tok;
+ __le32 fqid;
+ uint32_t reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ uint8_t fd[32];
+ } dq;
+ struct scn {
+ uint8_t verb;
+ uint8_t stat;
+ uint8_t state;
+ uint8_t reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
};
/* TODO:
@@ -254,11 +281,21 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
/**
* struct qbman_pull_desc - the structure for pull dequeue descriptor
- * @dont_manipulate_directly: the 6 32bit data to represent the whole
- * possible settings for pull dequeue descriptor.
*/
struct qbman_pull_desc {
- uint32_t dont_manipulate_directly[6];
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct pull {
+ uint8_t verb;
+ uint8_t numf;
+ uint8_t tok;
+ uint8_t reserved;
+ uint32_t dq_src;
+ uint64_t rsp_addr;
+ uint64_t rsp_addr_virt;
+ uint8_t padding[40];
+ } pull;
+ };
};
enum qbman_pull_type_e {
@@ -292,7 +329,7 @@ void qbman_pull_desc_clear(struct qbman_pull_desc *d);
*/
void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
struct qbman_result *storage,
- dma_addr_t storage_phys,
+ uint64_t storage_phys,
int stash);
/**
* qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
@@ -415,7 +452,20 @@ struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
* dequeue result.
*/
int qbman_result_has_new_result(struct qbman_swp *s,
- const struct qbman_result *dq);
+ struct qbman_result *dq);
+
+/**
+ * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * is completed and results are available in memory.
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_check_command_complete(struct qbman_result *dq);
+
+int qbman_check_new_result(struct qbman_result *dq);
/* -------------------------------------------------------- */
/* Parsing dequeue entries (DQRR and user-provided storage) */
@@ -537,7 +587,7 @@ int qbman_result_is_FQPN(const struct qbman_result *dq);
*
* Return the state field.
*/
-uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
/**
* qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
@@ -648,24 +698,6 @@ uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
*/
uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
-/**
- * qbman_result_SCN_state_in_mem() - Get the state in notification written
- * in memory
- * @scn: the state change notification.
- *
- * Return the state.
- */
-uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);
-
-/**
- * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written
- * in memory.
- * @scn: the state change notification.
- *
- * Return the resource id.
- */
-uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);
-
/* Type-specific "resource IDs". Mainly for illustration purposes, though it
* also gives the appropriate type widths.
*/
@@ -746,22 +778,35 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
/* Enqueues */
/************/
-/**
- * struct qbman_eq_desc - structure of enqueue descriptor
- * @dont_manipulate_directly: the 8 32bit data to represent the whole
- * possible qbman enqueue setting in enqueue descriptor.
- */
+/* struct qbman_eq_desc - structure of enqueue descriptor */
struct qbman_eq_desc {
- uint32_t dont_manipulate_directly[8];
+ union {
+ uint32_t donot_manipulate_directly[8];
+ struct eq {
+ uint8_t verb;
+ uint8_t dca;
+ uint16_t seqnum;
+ uint16_t orpid;
+ uint16_t reserved1;
+ uint32_t tgtid;
+ uint32_t tag;
+ uint16_t qdbin;
+ uint8_t qpri;
+ uint8_t reserved[3];
+ uint8_t wae;
+ uint8_t rspid;
+ uint64_t rsp_addr;
+ } eq;
+ };
};
/**
* struct qbman_eq_response - structure of enqueue response
- * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * @donot_manipulate_directly: the 16 32bit data to represent the whole
* enqueue response.
*/
struct qbman_eq_response {
- uint32_t dont_manipulate_directly[16];
+ uint32_t donot_manipulate_directly[16];
};
/**
@@ -801,7 +846,7 @@ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
* sequeue number.
*/
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
- uint32_t opr_id, uint32_t seqnum, int incomplete);
+ uint16_t opr_id, uint16_t seqnum, int incomplete);
/**
* qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
@@ -810,8 +855,8 @@ void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
*/
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum);
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
/**
* qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
@@ -820,8 +865,8 @@ void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
*/
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum);
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum);
/**
* qbman_eq_desc_set_response() - Set the enqueue response info.
* @d: the enqueue descriptor
@@ -835,7 +880,7 @@ void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
* expresses a cache-warming attribute.
*/
void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
- dma_addr_t storage_phys,
+ uint64_t storage_phys,
int stash);
/**
@@ -873,7 +918,7 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
* @qd_prio: the queuing destination priority.
*/
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
- uint32_t qd_bin, uint32_t qd_prio);
+ uint16_t qd_bin, uint8_t qd_prio);
/**
* qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
@@ -898,7 +943,7 @@ void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
* being rescheduled.)
*/
void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
- uint32_t dqrr_idx, int park);
+ uint8_t dqrr_idx, int park);
/**
* qbman_swp_enqueue() - Issue an enqueue command.
@@ -914,19 +959,33 @@ void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
/**
- * qbman_swp_enqueue_multiple_eqdesc() - Enqueue multiple frames with separte
- * enqueue descriptors.
+ * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
+ eq descriptor
* @s: the software portal used for enqueue.
- * @d: the enqueue descriptors
+ * @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
*/
-int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
int num_frames);
+/**
+ * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
+ * individual eq descriptor.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ * @num_frames: the number of the frames to be enqueued.
+ *
+ * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames);
/* TODO:
* qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
@@ -943,11 +1002,20 @@ int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
/*******************/
/**
* struct qbman_release_desc - The structure for buffer release descriptor
- * @dont_manipulate_directly: the 32bit data to represent the whole
+ * @donot_manipulate_directly: the 32bit data to represent the whole
* possible settings of qbman release descriptor.
*/
struct qbman_release_desc {
- uint32_t dont_manipulate_directly[1];
+ union {
+ uint32_t donot_manipulate_directly[16];
+ struct br {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint32_t reserved2;
+ uint64_t buf[7];
+ } br;
+ };
};
/**
@@ -961,7 +1029,7 @@ void qbman_release_desc_clear(struct qbman_release_desc *d);
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
* @d: the qbman release descriptor.
*/
-void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
/**
* qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
@@ -1004,7 +1072,7 @@ int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
* Return 0 for success, or negative error code if the acquire command
* fails.
*/
-int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
unsigned int num_buffers);
/*****************/
@@ -1119,19 +1187,4 @@ int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
*/
int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
uint64_t ctx);
-int qbman_swp_fill_ring(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- uint8_t burst_index);
-int qbman_swp_flush_ring(struct qbman_swp *s);
-void qbman_sync(void);
-int qbman_swp_send_multiple(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int frames_to_send);
-
-int qbman_check_command_complete(struct qbman_swp *s,
- const struct qbman_result *dq);
-
-int qbman_get_version(void);
#endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index dd62e9af..809770c7 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -69,21 +69,23 @@
/* Pre-defined attribute codes */
/*******************************/
-struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
-struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
/*************************/
/* SDQCR attribute codes */
/*************************/
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
-/* we put these here because at least some of them are required by
- * qbman_swp_init()
- */
-struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
-struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
-struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
-static struct qb_attr_code code_eq_dca_idx;
-#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
enum qbman_sdqcr_dct {
qbman_sdqcr_dct_null = 0,
qbman_sdqcr_dct_prio_ics,
@@ -96,17 +98,13 @@ enum qbman_sdqcr_fc {
qbman_sdqcr_fc_up_to_3 = 1
};
-struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
-
/* We need to keep track of which SWP triggered a pull command
* so keep an array of portal IDs and use the token field to
* be able to find the proper portal
*/
-#define MAX_QBMAN_PORTALS 35
+#define MAX_QBMAN_PORTALS 64
static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
-uint32_t qman_version;
-
/*********************************/
/* Portal constructor/destructor */
/*********************************/
@@ -128,7 +126,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
int ret;
uint32_t eqcr_pi;
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct qbman_swp *p = malloc(sizeof(*p));
if (!p)
return NULL;
@@ -138,9 +136,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
#endif
p->mc.valid_bit = QB_VALID_BIT;
p->sdq = 0;
- qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
- qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
- qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
atomic_set(&p->vdq.busy, 1);
p->vdq.valid_bit = QB_VALID_BIT;
p->dqrr.next_idx = 0;
@@ -149,25 +148,21 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
p->dqrr.dqrr_size = 4;
p->dqrr.reset_bug = 1;
- /* Set size of DQRR to 4, encoded in 2 bits */
- code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
} else {
p->dqrr.dqrr_size = 8;
p->dqrr.reset_bug = 0;
- /* Set size of DQRR to 8, encoded in 3 bits */
- code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
}
ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
if (ret) {
- kfree(p);
+ free(p);
pr_err("qbman_swp_sys_init() failed %d\n", ret);
return NULL;
}
/* SDQCR needs to be initialized to 0 when no channels are
* being dequeued from or else the QMan HW will indicate an
* error. The values that were calculated above will be
- * applied when dequeues from a specific channel are enabled
+ * applied when dequeues from a specific channel are enabled.
*/
qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
@@ -188,7 +183,7 @@ void qbman_swp_finish(struct qbman_swp *p)
#endif
qbman_swp_sys_finish(&p->sys);
portal_idx_map[p->desc.idx] = NULL;
- kfree(p);
+ free(p);
}
const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
@@ -282,9 +277,9 @@ void *qbman_swp_mc_start(struct qbman_swp *p)
return ret;
}
-void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb)
{
- uint32_t *v = cmd;
+ uint8_t *v = cmd;
#ifdef QBMAN_CHECKING
QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
#endif
@@ -325,35 +320,22 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
/* Enqueue */
/***********/
-/* These should be const, eventually */
-static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
-static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
-static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
-static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
-/* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
-static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
-static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
-static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
-static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
-static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
-static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
-/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
-static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
-static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
-static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
-static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
-static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
-static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
-
-enum qbman_eq_cmd_e {
- /* No enqueue, primarily for plugging ORP gaps for dropped frames */
- qbman_eq_cmd_empty,
- /* DMA an enqueue response once complete */
- qbman_eq_cmd_respond,
- /* DMA an enqueue response only if the enqueue fails */
- qbman_eq_cmd_respond_reject
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
};
+#define QB_ENQUEUE_CMD_EC_OPTION_MASK 0x3
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_PK_SHIFT 6
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
+#define QB_ENQUEUE_CMD_NLIS_SHIFT 14
+#define QB_ENQUEUE_CMD_IS_NESN_SHIFT 15
+
void qbman_eq_desc_clear(struct qbman_eq_desc *d)
{
memset(d, 0, sizeof(*d));
@@ -361,115 +343,110 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d)
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_orp_en, cl, 0);
- qb_attr_code_encode(&code_eq_cmd, cl,
- respond_success ? qbman_eq_cmd_respond :
- qbman_eq_cmd_respond_reject);
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
}
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
- uint32_t opr_id, uint32_t seqnum, int incomplete)
+ uint16_t opr_id, uint16_t seqnum, int incomplete)
{
- uint32_t *cl = qb_cl(d);
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ if (respond_success)
+ d->eq.verb |= enqueue_response_always;
+ else
+ d->eq.verb |= enqueue_rejects_to_fq;
- qb_attr_code_encode(&code_eq_orp_en, cl, 1);
- qb_attr_code_encode(&code_eq_cmd, cl,
- respond_success ? qbman_eq_cmd_respond :
- qbman_eq_cmd_respond_reject);
- qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
- qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
- qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ if (incomplete)
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_NLIS_SHIFT;
+ else
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
}
-void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum)
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_orp_en, cl, 1);
- qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
- qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
- qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
- qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
- qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT);
}
-void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
- uint32_t seqnum)
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
+ uint16_t seqnum)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_orp_en, cl, 1);
- qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
- qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
- qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
- qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
- qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT;
+ d->eq.verb &= ~QB_ENQUEUE_CMD_EC_OPTION_MASK;
+ d->eq.orpid = opr_id;
+ d->eq.seqnum = seqnum;
+ d->eq.seqnum &= ~(1 << QB_ENQUEUE_CMD_NLIS_SHIFT);
+ d->eq.seqnum |= 1 << QB_ENQUEUE_CMD_IS_NESN_SHIFT;
}
void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
dma_addr_t storage_phys,
int stash)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
- qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
+ d->eq.rsp_addr = storage_phys;
+ d->eq.wae = stash;
}
void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
+ d->eq.rspid = token;
}
void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_qd_en, cl, 0);
- qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->eq.tgtid = fqid;
}
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
- uint32_t qd_bin, uint32_t qd_prio)
+ uint16_t qd_bin, uint8_t qd_prio)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_qd_en, cl, 1);
- qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
- qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
- qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->eq.tgtid = qdid;
+ d->eq.qdbin = qd_bin;
+ d->eq.qpri = qd_prio;
}
void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
+ if (enable)
+ d->eq.verb |= 1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT;
+ else
+ d->eq.verb &= ~(1 << QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT);
}
void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
- uint32_t dqrr_idx, int park)
+ uint8_t dqrr_idx, int park)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
if (enable) {
- qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
- qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
+ d->eq.dca = dqrr_idx;
+ if (park)
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT;
+ else
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_PK_SHIFT);
+ d->eq.dca |= 1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT;
+ } else {
+ d->eq.dca &= ~(1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT);
}
}
#define EQAR_IDX(eqar) ((eqar) & 0x7)
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -479,20 +456,20 @@ static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
if (!EQAR_SUCCESS(eqar))
return -EBUSY;
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
- word_copy(&p[1], &cl[1], 7);
- word_copy(&p[8], fd, sizeof(*fd) >> 2);
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
/* Set the verb byte, have to substitute in the valid-bit */
lwsync();
p[0] = cl[0] | EQAR_VB(eqar);
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
return 0;
}
static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
+ const struct qbman_fd *fd)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
@@ -511,31 +488,44 @@ static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
}
p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
- word_copy(&p[1], &cl[1], 7);
- word_copy(&p[8], fd, sizeof(*fd) >> 2);
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], fd, sizeof(*fd));
lwsync();
+
/* Set the verb byte, have to substitute in the valid-bit */
p[0] = cl[0] | s->eqcr.pi_vb;
qbman_cena_write_complete_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
s->eqcr.pi++;
s->eqcr.pi &= 0xF;
s->eqcr.available--;
if (!(s->eqcr.pi & 7))
s->eqcr.pi_vb ^= QB_VALID_BIT;
+
return 0;
}
-int qbman_swp_fill_ring(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- __attribute__((unused)) uint8_t burst_index)
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
+ return qbman_swp_enqueue_array_mode(s, d, fd);
+ else /* Use ring mode by default */
+ return qbman_swp_enqueue_ring_mode(s, d, fd);
+}
+
+int qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
{
uint32_t *p;
const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci;
+ uint32_t eqcr_ci, eqcr_pi;
uint8_t diff;
+ int i, num_enqueued = 0;
+ uint64_t addr_cena;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
@@ -545,64 +535,58 @@ int qbman_swp_fill_ring(struct qbman_swp *s,
eqcr_ci, s->eqcr.ci);
s->eqcr.available += diff;
if (!diff)
- return -EBUSY;
+ return 0;
}
- p = qbman_cena_write_start_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
- /* word_copy(&p[1], &cl[1], 7); */
- memcpy(&p[1], &cl[1], 7 * 4);
- /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
- memcpy(&p[8], fd, sizeof(struct qbman_fd));
-
- /* lwsync(); */
- p[0] = cl[0] | s->eqcr.pi_vb;
-
- s->eqcr.pi++;
- s->eqcr.pi &= 0xF;
- s->eqcr.available--;
- if (!(s->eqcr.pi & 7))
- s->eqcr.pi_vb ^= QB_VALID_BIT;
-
- return 0;
-}
-int qbman_swp_flush_ring(struct qbman_swp *s)
-{
- void *ptr = s->sys.addr_cena;
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ memcpy(&p[1], &cl[1], 28);
+ memcpy(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
- dcbf((uint64_t)ptr);
- dcbf((uint64_t)ptr + 0x40);
- dcbf((uint64_t)ptr + 0x80);
- dcbf((uint64_t)ptr + 0xc0);
- dcbf((uint64_t)ptr + 0x100);
- dcbf((uint64_t)ptr + 0x140);
- dcbf((uint64_t)ptr + 0x180);
- dcbf((uint64_t)ptr + 0x1c0);
+ lwsync();
- return 0;
-}
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ if (!(eqcr_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
-void qbman_sync(void)
-{
- lwsync();
-}
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ addr_cena = (uint64_t)s->sys.addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dcbf((uint64_t *)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ eqcr_pi++;
+ eqcr_pi &= 0xF;
+ }
+ s->eqcr.pi = eqcr_pi;
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
- const struct qbman_fd *fd)
-{
- if (s->sys.eqcr_mode == qman_eqcr_vb_array)
- return qbman_swp_enqueue_array_mode(s, d, fd);
- else /* Use ring mode by default */
- return qbman_swp_enqueue_ring_mode(s, d, fd);
+ return num_enqueued;
}
-int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int num_frames)
+int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
{
uint32_t *p;
- const uint32_t *cl = qb_cl(d);
+ const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi;
uint8_t diff;
int i, num_enqueued = 0;
@@ -627,29 +611,26 @@ int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
memcpy(&p[1], &cl[1], 28);
memcpy(&p[8], &fd[i], sizeof(*fd));
eqcr_pi++;
eqcr_pi &= 0xF;
- /*Pointing to the next enqueue descriptor*/
- cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
}
lwsync();
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi;
- cl = qb_cl(d);
for (i = 0; i < num_enqueued; i++) {
p = qbman_cena_write_start_wo_shadow(&s->sys,
QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
+ cl = qb_cl(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++;
eqcr_pi &= 0xF;
if (!(eqcr_pi & 7))
s->eqcr.pi_vb ^= QB_VALID_BIT;
- /*Pointing to the next enqueue descriptor*/
- cl += (sizeof(struct qbman_eq_desc) / sizeof(uint32_t));
}
/* Flush all the cacheline without load/store in between */
@@ -672,23 +653,26 @@ int qbman_swp_enqueue_multiple_eqdesc(struct qbman_swp *s,
void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
{
- struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
+ uint16_t src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
QBMAN_BUG_ON(channel_idx > 15);
- *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
+ *enabled = src | (1 << channel_idx);
}
void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
{
uint16_t dqsrc;
- struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
QBMAN_BUG_ON(channel_idx > 15);
- qb_attr_code_encode(&code, &s->sdq, !!enable);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
/* Read make the complete src map. If no channels are enabled
* the SDQCR must be 0 or else QMan will assert errors
*/
- dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
if (dqsrc != 0)
qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
else
@@ -700,14 +684,10 @@ void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
/***************************/
/* These should be const, eventually */
-static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
-static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
-static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
-static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
-static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
-static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
-static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
-static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
enum qb_pull_dt_e {
qb_pull_dt_channel,
@@ -725,63 +705,52 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
dma_addr_t storage_phys,
int stash)
{
- uint32_t *cl = qb_cl(d);
- /* Squiggle the pointer 'storage' into the extra 2 words of the
- * descriptor (which aren't copied to the hw command)
- */
- *(void **)&cl[4] = storage;
+ d->pull.rsp_addr_virt = (uint64_t)storage;
+
if (!storage) {
- qb_attr_code_encode(&code_pull_rls, cl, 0);
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
return;
}
- qb_attr_code_encode(&code_pull_rls, cl, 1);
- qb_attr_code_encode(&code_pull_stash, cl, !!stash);
- qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
+ d->pull.verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->pull.verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->pull.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->pull.rsp_addr = storage_phys;
}
void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
{
- uint32_t *cl = qb_cl(d);
-
- QBMAN_BUG_ON(!numframes || (numframes > 16));
- qb_attr_code_encode(&code_pull_numframes, cl,
- (uint32_t)(numframes - 1));
+ d->pull.numf = numframes - 1;
}
void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_pull_token, cl, token);
+ d->pull.tok = token;
}
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_pull_dct, cl, 1);
- qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
- qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
+ d->pull.verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = fqid;
}
void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
enum qbman_pull_type_e dct)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_pull_dct, cl, dct);
- qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
- qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = wqid;
}
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
enum qbman_pull_type_e dct)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_pull_dct, cl, dct);
- qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
- qb_attr_code_encode(&code_pull_dqsource, cl, chid);
+ d->pull.verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->pull.verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->pull.dq_src = chid;
}
int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
@@ -793,18 +762,18 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
atomic_inc(&s->vdq.busy);
return -EBUSY;
}
- s->vdq.storage = *(void **)&cl[4];
- /* We use portal index +1 as token so that 0 still indicates
- * that the result isn't valid yet.
- */
- qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
+
+ d->pull.tok = s->sys.idx + 1;
+ s->vdq.storage = (void *)d->pull.rsp_addr_virt;
p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
- word_copy(&p[1], &cl[1], 3);
+ memcpy(&p[1], &cl[1], 12);
+
/* Set the verb byte, have to substitute in the valid-bit */
lwsync();
p[0] = cl[0] | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+
return 0;
}
@@ -812,16 +781,7 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
/* Polling DQRR */
/****************/
-static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
-static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
-static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
-static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
-static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
-/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
-static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
-static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
-static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
-static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
+#define QMAN_DQRR_PI_MASK 0xf
#define QBMAN_RESULT_DQ 0x60
#define QBMAN_RESULT_FQRN 0x21
@@ -834,8 +794,6 @@ static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
#define QBMAN_RESULT_BPSCN 0x29
#define QBMAN_RESULT_CSCN_WQ 0x2a
-static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
-
/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
@@ -845,8 +803,7 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
uint32_t verb;
uint32_t response_verb;
uint32_t flags;
- const struct qbman_result *dq;
- const uint32_t *p;
+ const struct qbman_result *p;
/* Before using valid-bit to detect if something is there, we have to
* handle the case of the DQRR reset bug...
@@ -859,11 +816,13 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
* will be much less efficient than all subsequent trips around
* it...
*/
- uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
- uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
+ uint8_t pi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
/* there are new entries if pi != next_idx */
if (pi == s->dqrr.next_idx)
return NULL;
+
/* if next_idx is/was the last ring index, and 'pi' is
* different, we can disable the workaround as all the ring
* entries have now been DMA'd to so valid-bit checking is
@@ -878,12 +837,12 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
s->dqrr.reset_bug = 0;
}
qbman_cena_invalidate_prefetch(&s->sys,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
- dq = qbman_cena_read_wo_shadow(&s->sys,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- p = qb_cl(dq);
- verb = qb_attr_code_decode(&code_dqrr_verb, p);
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
/* If the valid-bit isn't of the expected polarity, nothing there. Note,
* in the DQRR reset bug workaround, we shouldn't need to skip these
* check, because we've already determined that a new entry is available
@@ -903,16 +862,16 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
s->dqrr.valid_bit ^= QB_VALID_BIT;
}
/* If this is the final response to a volatile dequeue command
- * indicate that the vdq is no longer busy.
+ * indicate that the vdq is no longer busy
*/
- flags = qbman_result_DQ_flags(dq);
- response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESPONSE_VERB_MASK;
if ((response_verb == QBMAN_RESULT_DQ) &&
(flags & QBMAN_DQ_STAT_VOLATILE) &&
(flags & QBMAN_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.busy);
- return dq;
+ return p;
}
/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
@@ -925,80 +884,69 @@ void qbman_swp_dqrr_consume(struct qbman_swp *s,
/*********************************/
/* Polling user-provided storage */
/*********************************/
-
-int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
- const struct qbman_result *dq)
+int qbman_result_has_new_result(struct qbman_swp *s,
+ struct qbman_result *dq)
{
- /* To avoid converting the little-endian DQ entry to host-endian prior
- * to us knowing whether there is a valid entry or not (and run the
- * risk of corrupting the incoming hardware LE write), we detect in
- * hardware endianness rather than host. This means we need a different
- * "code" depending on whether we are BE or LE in software, which is
- * where DQRR_TOK_OFFSET comes in...
+ if (dq->dq.tok == 0)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
*/
- static struct qb_attr_code code_dqrr_tok_detect =
- QB_CODE(0, DQRR_TOK_OFFSET, 8);
- /* The user trying to poll for a result treats "dq" as const. It is
- * however the same address that was provided to us non-const in the
- * first place, for directing hardware DMA to. So we can cast away the
- * const because it is mutable from our perspective.
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
*/
- uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
- uint32_t token;
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+
+ return 1;
+}
- token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
- if (token == 0)
+int qbman_check_new_result(struct qbman_result *dq)
+{
+ if (dq->dq.tok == 0)
return 0;
- /* Entry is valid - overwrite token back to 0 so
- * a) If this memory is reused tokesn will be 0
- * b) If someone calls "has_new_result()" again on this entry it
- * will not appear to be new
- */
- qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
- /* Only now do we convert from hardware to host endianness. Also, as we
- * are returning success, the user has promised not to call us again, so
- * there's no risk of us converting the endianness twice...
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
*/
- make_le32_n(p, 16);
+ ((struct qbman_result *)dq)->dq.tok = 0;
+
return 1;
}
-int qbman_check_command_complete(struct qbman_swp *s,
- const struct qbman_result *dq)
+int qbman_check_command_complete(struct qbman_result *dq)
{
- /* To avoid converting the little-endian DQ entry to host-endian prior
- * to us knowing whether there is a valid entry or not (and run the
- * risk of corrupting the incoming hardware LE write), we detect in
- * hardware endianness rather than host. This means we need a different
- * "code" depending on whether we are BE or LE in software, which is
- * where DQRR_TOK_OFFSET comes in...
- */
- static struct qb_attr_code code_dqrr_tok_detect =
- QB_CODE(0, DQRR_TOK_OFFSET, 8);
- /* The user trying to poll for a result treats "dq" as const. It is
- * however the same address that was provided to us non-const in the
- * first place, for directing hardware DMA to. So we can cast away the
- * const because it is mutable from our perspective.
- */
- uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
- uint32_t token;
+ struct qbman_swp *s;
- token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
- if (token == 0)
+ if (dq->dq.tok == 0)
return 0;
- /* TODO: Remove qbman_swp from parameters and make it a local
- * once we've tested the reserve portal map change
- */
- s = portal_idx_map[token - 1];
- /* When token is set it indicates that VDQ command has been fetched
- * by qbman and is working on it. It is safe for software to issue
- * another VDQ command, so incrementing the busy variable.
+
+ s = portal_idx_map[dq->dq.tok - 1];
+ /*
+ * VDQCR "no longer busy" hook - not quite the same as DQRR, because the
+ * fact "VDQCR" shows busy doesn't mean that we hold the result that
+ * makes it available. Eg. we may be looking at our 10th dequeue result,
+ * having released VDQCR after the 1st result and it is now busy due to
+ * some other command!
*/
if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
atomic_inc(&s->vdq.busy);
}
+
return 1;
}
@@ -1006,23 +954,10 @@ int qbman_check_command_complete(struct qbman_swp *s,
/* Categorising qbman results */
/********************************/
-static struct qb_attr_code code_result_in_mem =
- QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
-
static inline int __qbman_result_is_x(const struct qbman_result *dq,
- uint32_t x)
-{
- const uint32_t *p = qb_cl(dq);
- uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
-
- return (response_verb == x);
-}
-
-static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
- uint32_t x)
+ uint8_t x)
{
- const uint32_t *p = qb_cl(dq);
- uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
+ uint8_t response_verb = dq->dq.verb & QBMAN_RESPONSE_VERB_MASK;
return (response_verb == x);
}
@@ -1044,28 +979,28 @@ int qbman_result_is_CDAN(const struct qbman_result *dq)
int qbman_result_is_CSCN(const struct qbman_result *dq)
{
- return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_MEM) ||
__qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
}
int qbman_result_is_BPSCN(const struct qbman_result *dq)
{
- return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
+ return __qbman_result_is_x(dq, QBMAN_RESULT_BPSCN);
}
int qbman_result_is_CGCU(const struct qbman_result *dq)
{
- return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CGCU);
}
int qbman_result_is_FQRN(const struct qbman_result *dq)
{
- return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRN);
}
int qbman_result_is_FQRNI(const struct qbman_result *dq)
{
- return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQRNI);
}
int qbman_result_is_FQPN(const struct qbman_result *dq)
@@ -1079,109 +1014,62 @@ int qbman_result_is_FQPN(const struct qbman_result *dq)
/* These APIs assume qbman_result_is_DQ() is TRUE */
-uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
+uint8_t qbman_result_DQ_flags(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return qb_attr_code_decode(&code_dqrr_stat, p);
+ return dq->dq.stat;
}
uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
+ return dq->dq.seqnum;
}
uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
+ return dq->dq.oprid;
}
uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return qb_attr_code_decode(&code_dqrr_fqid, p);
+ return dq->dq.fqid;
}
uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return qb_attr_code_decode(&code_dqrr_byte_count, p);
+ return dq->dq.fq_byte_cnt;
}
uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return qb_attr_code_decode(&code_dqrr_frame_count, p);
+ return dq->dq.fq_frm_cnt;
}
uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
{
- const uint64_t *p = (const uint64_t *)qb_cl(dq);
-
- return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
+ return dq->dq.fqd_ctx;
}
const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
{
- const uint32_t *p = qb_cl(dq);
-
- return (const struct qbman_fd *)&p[8];
+ return (const struct qbman_fd *)&dq->dq.fd[0];
}
/**************************************/
/* Parsing state-change notifications */
/**************************************/
-
-static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
-static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
-static struct qb_attr_code code_scn_state_in_mem =
- QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
-static struct qb_attr_code code_scn_rid_in_mem =
- QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
-static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
-
uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
{
- const uint32_t *p = qb_cl(scn);
-
- return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
+ return scn->scn.state;
}
uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
{
- const uint32_t *p = qb_cl(scn);
-
- return qb_attr_code_decode(&code_scn_rid, p);
+ return scn->scn.rid_tok;
}
uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
{
- const uint64_t *p = (const uint64_t *)qb_cl(scn);
-
- return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
-}
-
-uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
-{
- const uint32_t *p = qb_cl(scn);
-
- return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
-}
-
-uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
-{
- const uint32_t *p = qb_cl(scn);
- uint32_t result_rid;
-
- result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
- return make_le24(result_rid);
+ return scn->scn.ctx;
}
/*****************/
@@ -1189,34 +1077,27 @@ uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
/*****************/
uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
{
- return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0x3FFF;
}
int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
{
- return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
+ return !(int)(qbman_result_SCN_state(scn) & 0x1);
}
int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
{
- return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
+ return (int)(qbman_result_SCN_state(scn) & 0x2);
}
int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
{
- return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
+ return (int)(qbman_result_SCN_state(scn) & 0x4);
}
uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
{
- uint64_t ctx;
- uint32_t ctx_hi, ctx_lo;
-
- ctx = qbman_result_SCN_ctx(scn);
- ctx_hi = upper32(ctx);
- ctx_lo = lower32(ctx);
- return ((uint64_t)make_le32(ctx_hi) << 32 |
- (uint64_t)make_le32(ctx_lo));
+ return qbman_result_SCN_ctx(scn);
}
/*****************/
@@ -1224,52 +1105,37 @@ uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
/*****************/
uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
{
- return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
+ return (uint16_t)qbman_result_SCN_rid(scn) & 0xFFFF;
}
uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
{
- uint64_t ctx;
- uint32_t ctx_hi, ctx_lo;
-
- ctx = qbman_result_SCN_ctx(scn);
- ctx_hi = upper32(ctx);
- ctx_lo = lower32(ctx);
- return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
- (uint64_t)make_le32(ctx_lo);
+ return qbman_result_SCN_ctx(scn);
}
/******************/
/* Buffer release */
/******************/
-
-/* These should be const, eventually */
-/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
-static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
-static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
-static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
+#define QB_BR_RC_VALID_SHIFT 5
+#define QB_BR_RCDI_SHIFT 6
void qbman_release_desc_clear(struct qbman_release_desc *d)
{
- uint32_t *cl;
-
memset(d, 0, sizeof(*d));
- cl = qb_cl(d);
- qb_attr_code_encode(&code_release_set_me, cl, 1);
+ d->br.verb = 1 << QB_BR_RC_VALID_SHIFT;
}
-void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_release_bpid, cl, bpid);
+ d->br.bpid = bpid;
}
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
{
- uint32_t *cl = qb_cl(d);
-
- qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
+ if (enable)
+ d->br.verb |= 1 << QB_BR_RCDI_SHIFT;
+ else
+ d->br.verb &= ~(1 << QB_BR_RCDI_SHIFT);
}
#define RAR_IDX(rar) ((rar) & 0x7)
@@ -1286,12 +1152,16 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
pr_debug("RAR=%08x\n", rar);
if (!RAR_SUCCESS(rar))
return -EBUSY;
+
QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
/* Start the release command */
p = qbman_cena_write_start_wo_shadow(&s->sys,
QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
/* Copy the caller's buffer pointers to the command */
u64_to_le32_copy(&p[2], buffers, num_buffers);
+
/* Set the verb byte, have to substitute in the valid-bit and the number
* of buffers.
*/
@@ -1299,25 +1169,38 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
p[0] = cl[0] | RAR_VB(rar) | num_buffers;
qbman_cena_write_complete_wo_shadow(&s->sys,
QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
return 0;
}
/*******************/
/* Buffer acquires */
/*******************/
+struct qbman_acquire_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t bpid;
+ uint8_t num;
+ uint8_t reserved2[59];
+};
-/* These should be const, eventually */
-static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
-static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
-static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
+struct qbman_acquire_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t reserved;
+ uint8_t num;
+ uint8_t reserved2[3];
+ uint64_t buf[7];
+};
-int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
unsigned int num_buffers)
{
- uint32_t *p;
- uint32_t rslt, num;
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
- QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
/* Start the management command */
p = qbman_swp_mc_start(s);
@@ -1326,59 +1209,81 @@ int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
return -EBUSY;
/* Encode the caller-provided attributes */
- qb_attr_code_encode(&code_acquire_bpid, p, bpid);
- qb_attr_code_encode(&code_acquire_num, p, num_buffers);
+ p->bpid = bpid;
+ p->num = num_buffers;
/* Complete the management command */
- p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
/* Decode the outcome */
- rslt = qb_attr_code_decode(&code_generic_rslt, p);
- num = qb_attr_code_decode(&code_acquire_r_num, p);
- QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
- QBMAN_MC_ACQUIRE);
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ACQUIRE);
/* Determine success or failure */
- if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
- bpid, rslt);
+ bpid, r->rslt);
return -EIO;
}
- QBMAN_BUG_ON(num > num_buffers);
+
+ QBMAN_BUG_ON(r->num > num_buffers);
+
/* Copy the acquired buffers to the caller's array */
- u64_from_le32_copy(buffers, &p[2], num);
- return (int)num;
+ u64_from_le32_copy(buffers, &r->buf[0], r->num);
+
+ return (int)r->num;
}
/*****************/
/* FQ management */
/*****************/
+struct qbman_alt_fq_state_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[56];
+};
-static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
+struct qbman_alt_fq_state_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint8_t reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
uint8_t alt_fq_verb)
{
- uint32_t *p;
- uint32_t rslt;
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
/* Start the management command */
p = qbman_swp_mc_start(s);
if (!p)
return -EBUSY;
- qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
+ p->fqid = fqid & ALT_FQ_FQID_MASK;
+
/* Complete the management command */
- p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
/* Decode the outcome */
- rslt = qb_attr_code_decode(&code_generic_rslt, p);
- QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != alt_fq_verb);
/* Determine success or failure */
- if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
- fqid, alt_fq_verb, rslt);
+ fqid, alt_fq_verb, r->rslt);
return -EIO;
}
@@ -1409,10 +1314,24 @@ int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
/* Channel management */
/**********************/
-static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
-static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
-static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
-static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
+struct qbman_cdan_ctrl_desc {
+ uint8_t verb;
+ uint8_t reserved;
+ uint16_t ch;
+ uint8_t we;
+ uint8_t ctrl;
+ uint16_t reserved2;
+ uint64_t cdan_ctx;
+ uint8_t reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ uint8_t verb;
+ uint8_t rslt;
+ uint16_t ch;
+ uint8_t reserved[60];
+};
/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
* would be irresponsible to expose it.
@@ -1424,8 +1343,8 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
uint8_t we_mask, uint8_t cdan_en,
uint64_t ctx)
{
- uint32_t *p;
- uint32_t rslt;
+ struct qbman_cdan_ctrl_desc *p;
+ struct qbman_cdan_ctrl_rslt *r;
/* Start the management command */
p = qbman_swp_mc_start(s);
@@ -1433,22 +1352,29 @@ static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
return -EBUSY;
/* Encode the caller-provided attributes */
- qb_attr_code_encode(&code_cdan_cid, p, channelid);
- qb_attr_code_encode(&code_cdan_we, p, we_mask);
- qb_attr_code_encode(&code_cdan_en, p, cdan_en);
- qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
+ p->ch = channelid;
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = ctx;
+
/* Complete the management command */
- p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
/* Decode the outcome */
- rslt = qb_attr_code_decode(&code_generic_rslt, p);
- QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
- != QBMAN_WQCHAN_CONFIGURE);
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK)
+ != QBMAN_WQCHAN_CONFIGURE);
/* Determine success or failure */
- if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
pr_err("CDAN cQID %d failed: code = 0x%02x\n",
- channelid, rslt);
+ channelid, r->rslt);
return -EIO;
}
@@ -1497,92 +1423,3 @@ struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
return dq;
}
-
-int qbman_swp_send_multiple(struct qbman_swp *s,
- const struct qbman_eq_desc *d,
- const struct qbman_fd *fd,
- int frames_to_send)
-{
- uint32_t *p;
- const uint32_t *cl = qb_cl(d);
- uint32_t eqcr_ci;
- uint8_t diff;
- int sent = 0;
- int i;
- int initial_pi = s->eqcr.pi;
- uint64_t start_pointer;
-
- if (!s->eqcr.available) {
- eqcr_ci = s->eqcr.ci;
- s->eqcr.ci = qbman_cena_read_reg(&s->sys,
- QBMAN_CENA_SWP_EQCR_CI) & 0xF;
- diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
- eqcr_ci, s->eqcr.ci);
- if (!diff)
- goto done;
- s->eqcr.available += diff;
- }
-
- /* we are trying to send frames_to_send,
- * if we have enough space in the ring
- */
- while (s->eqcr.available && frames_to_send--) {
- p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
- QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
- /* Write command (except of first byte) and FD */
- memcpy(&p[1], &cl[1], 7 * 4);
- memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
-
- initial_pi++;
- initial_pi &= 0xF;
- s->eqcr.available--;
- sent++;
- }
-
-done:
- initial_pi = s->eqcr.pi;
- lwsync();
-
- /* in order for flushes to complete faster:
- * we use a following trick: we record all lines in 32 bit word
- */
-
- initial_pi = s->eqcr.pi;
- for (i = 0; i < sent; i++) {
- p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
- QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
-
- p[0] = cl[0] | s->eqcr.pi_vb;
- initial_pi++;
- initial_pi &= 0xF;
-
- if (!(initial_pi & 7))
- s->eqcr.pi_vb ^= QB_VALID_BIT;
- }
-
- initial_pi = s->eqcr.pi;
-
- /* We need to flush all the lines but without
- * load/store operations between them.
- * We assign start_pointer before we start loop so that
- * in loop we do not read it from memory
- */
- start_pointer = (uint64_t)s->sys.addr_cena;
- for (i = 0; i < sent; i++) {
- p = (uint32_t *)(start_pointer
- + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
- dcbf((uint64_t)p);
- initial_pi++;
- initial_pi &= 0xF;
- }
-
- /* Update producer index for the next call */
- s->eqcr.pi = initial_pi;
-
- return sent;
-}
-
-int qbman_get_version(void)
-{
- return qman_version;
-}
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index 7aa1d4f6..d9f3ed7e 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -26,9 +26,14 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "qbman_private.h"
+#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
+uint32_t qman_version;
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((uint32_t)0x80)
@@ -39,7 +44,8 @@
#define QBMAN_EQCR_SIZE 8
-static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+static inline uint8_t qm_cyc_diff(uint8_t ringsize, uint8_t first,
+ uint8_t last)
{
/* 'first' is included, 'last' is excluded */
if (first <= last)
@@ -122,138 +128,22 @@ struct qbman_swp {
* non-NULL if only if the response is complete).
*/
void *qbman_swp_mc_start(struct qbman_swp *p);
-void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint8_t cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/* Wraps up submit + poll-for-result */
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
- uint32_t cmd_verb)
+ uint8_t cmd_verb)
{
- int loopvar;
+ int loopvar = 1000;
qbman_swp_mc_submit(swp, cmd, cmd_verb);
- DBG_POLL_START(loopvar);
do {
- DBG_POLL_CHECK(loopvar);
cmd = qbman_swp_mc_result(swp);
- } while (!cmd);
- return cmd;
-}
-
-/* ------------ */
-/* qb_attr_code */
-/* ------------ */
-
-/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
- * is either serving as a configuration command or a query result. The
- * representation is inherently little-endian, as the indexing of the words is
- * itself little-endian in nature and DPAA2 QBMan is little endian for anything
- * that crosses a word boundary too (64-bit fields are the obvious examples).
- */
-struct qb_attr_code {
- unsigned int word; /* which uint32_t[] array member encodes the field */
- unsigned int lsoffset; /* encoding offset from ls-bit */
- unsigned int width; /* encoding width. (bool must be 1.) */
-};
-
-/* Some pre-defined codes */
-extern struct qb_attr_code code_generic_verb;
-extern struct qb_attr_code code_generic_rslt;
-
-/* Macros to define codes */
-#define QB_CODE(a, b, c) { a, b, c}
-#define QB_CODE_NULL \
- QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
-
-/* Rotate a code "ms", meaning that it moves from less-significant bytes to
- * more-significant, from less-significant words to more-significant, etc. The
- * "ls" version does the inverse, from more-significant towards
- * less-significant.
- */
-static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
- unsigned int bits)
-{
- code->lsoffset += bits;
- while (code->lsoffset > 31) {
- code->word++;
- code->lsoffset -= 32;
- }
-}
-
-static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
- unsigned int bits)
-{
- /* Don't be fooled, this trick should work because the types are
- * unsigned. So the case that interests the while loop (the rotate has
- * gone too far and the word count needs to compensate for it), is
- * manifested when lsoffset is negative. But that equates to a really
- * large unsigned value, starting with lots of "F"s. As such, we can
- * continue adding 32 back to it until it wraps back round above zero,
- * to a value of 31 or less...
- */
- code->lsoffset -= bits;
- while (code->lsoffset > 31) {
- code->word--;
- code->lsoffset += 32;
- }
-}
+ } while (!cmd && loopvar--);
+ QBMAN_BUG_ON(!loopvar);
-/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
-#define qb_attr_code_for_ms(code, bits, expr) \
- for (; expr; qb_attr_code_rotate_ms(code, bits))
-#define qb_attr_code_for_ls(code, bits, expr) \
- for (; expr; qb_attr_code_rotate_ls(code, bits))
-
-/* decode a field from a cacheline */
-static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
- const uint32_t *cacheline)
-{
- return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
-}
-
-static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,
- const uint64_t *cacheline)
-{
- return cacheline[code->word / 2];
-}
-
-/* encode a field to a cacheline */
-static inline void qb_attr_code_encode(const struct qb_attr_code *code,
- uint32_t *cacheline, uint32_t val)
-{
- cacheline[code->word] =
- r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
- | e32_uint32_t(code->lsoffset, code->width, val);
-}
-
-static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
- uint64_t *cacheline, uint64_t val)
-{
- cacheline[code->word / 2] = val;
-}
-
-/* Small-width signed values (two's-complement) will decode into medium-width
- * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
- * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
- * 249. Likewise -120 would decode as 136.) This function allows the caller to
- * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
- * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).
- */
-static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
- uint32_t val)
-{
- QBMAN_BUG_ON(val >= (1u << code->width));
- /* code->width should never exceed the width of val. If it does then a
- * different function with larger val size must be used to translate
- * from unsigned to signed
- */
- QBMAN_BUG_ON(code->width > sizeof(val) * CHAR_BIT);
- /* If the high bit was set, it was encoding a negative */
- if (val >= 1u << (code->width - 1))
- return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -
- val);
- /* Otherwise, it was encoding a positive */
- return (int32_t)val;
+ return cmd;
}
/* ---------------------- */
@@ -274,4 +164,4 @@ static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
* an inline) is necessary to work with different descriptor types and to work
* correctly with const and non-const inputs (and similarly-qualified outputs).
*/
-#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
+#define qb_cl(d) (&(d)->donot_manipulate_directly[0])
diff --git a/drivers/bus/fslmc/qbman/qbman_private.h b/drivers/bus/fslmc/qbman/qbman_private.h
deleted file mode 100644
index 292ec6a9..00000000
--- a/drivers/bus/fslmc/qbman/qbman_private.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* Perform extra checking */
-#define QBMAN_CHECKING
-
-/* To maximise the amount of logic that is common between the Linux driver and
- * other targets (such as the embedded MC firmware), we pivot here between the
- * inclusion of two platform-specific headers.
- *
- * The first, qbman_sys_decl.h, includes any and all required system headers as
- * well as providing any definitions for the purposes of compatibility. The
- * second, qbman_sys.h, is where platform-specific routines go.
- *
- * The point of the split is that the platform-independent code (including this
- * header) may depend on platform-specific declarations, yet other
- * platform-specific routines may depend on platform-independent definitions.
- */
-
-#include "qbman_sys_decl.h"
-
-/* When things go wrong, it is a convenient trick to insert a few FOO()
- * statements in the code to trace progress. TODO: remove this once we are
- * hacking the code less actively.
- */
-#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
-
-/* Any time there is a register interface which we poll on, this provides a
- * "break after x iterations" scheme for it. It's handy for debugging, eg.
- * where you don't want millions of lines of log output from a polling loop
- * that won't, because such things tend to drown out the earlier log output
- * that might explain what caused the problem. (NB: put ";" after each macro!)
- * TODO: we should probably remove this once we're done sanitising the
- * simulator...
- */
-#define DBG_POLL_START(loopvar) (loopvar = 10)
-#define DBG_POLL_CHECK(loopvar) \
-do { \
- if (!(loopvar--)) \
- QBMAN_BUG_ON(NULL == "DBG_POLL_CHECK"); \
-} while (0)
-
-/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
- * and widths, these macro-generated encode/decode/isolate/remove inlines can
- * be used.
- *
- * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
- * where the field is located 3 bits "up" from the least-significant bit of the
- * register (ie. the field location within the 32-bit register corresponds to a
- * mask of 0x0001fff8), you would do;
- * uint16_t field = d32_uint16_t(3, 14, reg_value);
- *
- * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
- * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
- * operator) into a register at bit location 0x00080000 (19 bits "in" from the
- * LS bit), do;
- * reg_value |= e32_int(19, 1, !!field);
- *
- * If you wish to read-modify-write a register, such that you leave the 14-bit
- * field as-is but have all other fields set to zero, then "i"solate the 14-bit
- * value using;
- * reg_value = i32_uint16_t(3, 14, reg_value);
- *
- * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
- * zero) but leaving all other fields as-is;
- * reg_val = r32_int(19, 1, reg_value);
- *
- */
-#ifdef __LP64__
-#define MAKE_MASK32(width) ((uint32_t)(( 1ULL << width) - 1))
-#else
-#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
- (uint32_t)((1 << width) - 1))
-#endif
-#define DECLARE_CODEC32(t) \
-static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
-{ \
- QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
- return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
-} \
-static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
-{ \
- QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
- return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
-} \
-static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
- uint32_t val) \
-{ \
- QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
- return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
-} \
-static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
- uint32_t val) \
-{ \
- QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
- return ~(MAKE_MASK32(width) << lsoffset) & val; \
-}
-DECLARE_CODEC32(uint32_t)
-DECLARE_CODEC32(uint16_t)
-DECLARE_CODEC32(uint8_t)
-DECLARE_CODEC32(int)
-
- /*********************/
- /* Debugging assists */
- /*********************/
-
-static inline void __hexdump(unsigned long start, unsigned long end,
- unsigned long p, size_t sz, const unsigned char *c)
-{
- while (start < end) {
- unsigned int pos = 0;
- char buf[64];
- int nl = 0;
-
- pos += sprintf(buf + pos, "%08lx: ", start);
- do {
- if ((start < p) || (start >= (p + sz)))
- pos += sprintf(buf + pos, "..");
- else
- pos += sprintf(buf + pos, "%02x", *(c++));
- if (!(++start & 15)) {
- buf[pos++] = '\n';
- nl = 1;
- } else {
- nl = 0;
- if (!(start & 1))
- buf[pos++] = ' ';
- if (!(start & 3))
- buf[pos++] = ' ';
- }
- } while (start & 15);
- if (!nl)
- buf[pos++] = '\n';
- buf[pos] = '\0';
- pr_info("%s", buf);
- }
-}
-
-static inline void hexdump(const void *ptr, size_t sz)
-{
- unsigned long p = (unsigned long)ptr;
- unsigned long start = p & ~(unsigned long)15;
- unsigned long end = (p + sz + 15) & ~(unsigned long)15;
- const unsigned char *c = ptr;
-
- __hexdump(start, end, p, sz, c);
-}
-
-#include "qbman_sys.h"
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index 5dbcaa57..c216e9cf 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -40,20 +40,49 @@
* *not* to provide linux compatibility.
*/
-/* Trace the 3 different classes of read/write access to QBMan. #undef as
- * required.
- */
-#undef QBMAN_CCSR_TRACE
-#undef QBMAN_CINH_TRACE
-#undef QBMAN_CENA_TRACE
+#include "qbman_sys_decl.h"
-static inline void word_copy(void *d, const void *s, unsigned int cnt)
+/* Debugging assists */
+static inline void __hexdump(unsigned long start, unsigned long end,
+ unsigned long p, size_t sz, const unsigned char *c)
{
- uint32_t *dd = d;
- const uint32_t *ss = s;
+ while (start < end) {
+ unsigned int pos = 0;
+ char buf[64];
+ int nl = 0;
+
+ pos += sprintf(buf + pos, "%08lx: ", start);
+ do {
+ if ((start < p) || (start >= (p + sz)))
+ pos += sprintf(buf + pos, "..");
+ else
+ pos += sprintf(buf + pos, "%02x", *(c++));
+ if (!(++start & 15)) {
+ buf[pos++] = '\n';
+ nl = 1;
+ } else {
+ nl = 0;
+ if (!(start & 1))
+ buf[pos++] = ' ';
+ if (!(start & 3))
+ buf[pos++] = ' ';
+ }
+ } while (start & 15);
+ if (!nl)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+ pr_info("%s", buf);
+ }
+}
- while (cnt--)
- *(dd++) = *(ss++);
+static inline void hexdump(const void *ptr, size_t sz)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long start = p & ~15;
+ unsigned long end = (p + sz + 15) & ~15;
+ const unsigned char *c = ptr;
+
+ __hexdump(start, end, p, sz, c);
}
/* Currently, the CENA support code expects each 32-bit word to be written in
@@ -103,34 +132,6 @@ static inline void u64_from_le32_copy(uint64_t *d, const void *s,
}
}
-/* Convert a host-native 32bit value into little endian */
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-static inline uint32_t make_le32(uint32_t val)
-{
- return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
- ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
-}
-
-static inline uint32_t make_le24(uint32_t val)
-{
- return (((val & 0xff) << 16) | (val & 0xff00) |
- ((val & 0xff0000) >> 16));
-}
-
-static inline void make_le32_n(uint32_t *val, unsigned int num)
-{
- while (num--) {
- *val = make_le32(*val);
- val++;
- }
-}
-
-#else
-#define make_le32(val) (val)
-#define make_le24(val) (val)
-#define make_le32_n(val, len) do {} while (0)
-#endif
-
/******************/
/* Portal access */
/******************/
@@ -226,7 +227,6 @@ static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
#ifdef QBMAN_CENA_TRACE
pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
s->addr_cena, s->idx, offset);
- hexdump(cmd, 64);
#endif
dcbf(s->addr_cena + offset);
}
@@ -259,12 +259,8 @@ static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
uint32_t offset)
{
#ifdef QBMAN_CENA_TRACE
- pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
- s->addr_cena, s->idx, offset, shadow);
-#endif
-
-#ifdef QBMAN_CENA_TRACE
- hexdump(shadow, 64);
+ pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
#endif
return s->addr_cena + offset;
}
@@ -297,20 +293,20 @@ static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
* qbman_portal.c. So use of it is declared locally here.
*/
#define QBMAN_CINH_SWP_CFG 0xd00
+#define QBMAN_CINH_SWP_CFG 0xd00
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
-/* For MC portal use, we always configure with
- * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
- * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2)
- * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
- * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
- * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2)
- * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE)
- * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
- * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE)
- * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
- * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE)
- * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE)
- */
static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
uint8_t est, uint8_t rpm, uint8_t dcm,
uint8_t epm, int sd, int sp, int se,
@@ -318,12 +314,19 @@ static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
{
uint32_t reg;
- reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |
- e32_uint8_t(16, 3, est) |
- e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |
- e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |
- e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |
- e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn);
+ reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+
return reg;
}
@@ -336,7 +339,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
s->idx = (uint32_t)d->idx;
- s->cena = (void *)get_zeroed_page(GFP_KERNEL);
+ s->cena = malloc(4096);
if (!s->cena) {
pr_err("Could not allocate page for cena shadow\n");
return -1;
@@ -361,7 +364,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
if (!reg) {
pr_err("The portal %d is not enabled!\n", s->idx);
- kfree(s->cena);
+ free(s->cena);
return -1;
}
return 0;
@@ -369,17 +372,5 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
{
- free_page((unsigned long)s->cena);
-}
-
-static inline void *
-qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s,
- uint32_t offset)
-{
-#ifdef QBMAN_CENA_TRACE
- pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
- s->addr_cena, s->idx, offset);
-#endif
- QBMAN_BUG_ON(offset & 63);
- return (s->addr_cena + offset);
+ free(s->cena);
}
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index e52f5ed2..e1125cfe 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -34,27 +34,6 @@
#error "Unknown endianness!"
#endif
-/* The platform-independent code shouldn't need endianness, except for
- * weird/fast-path cases like qbman_result_has_token(), which needs to
- * perform a passive and endianness-specific test on a read-only data structure
- * very quickly. It's an exception, and this symbol is used for that case.
- */
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define DQRR_TOK_OFFSET 0
-#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24
-#define SCN_STATE_OFFSET_IN_MEM 8
-#define SCN_RID_OFFSET_IN_MEM 8
-#else
-#define DQRR_TOK_OFFSET 24
-#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0
-#define SCN_STATE_OFFSET_IN_MEM 16
-#define SCN_RID_OFFSET_IN_MEM 0
-#endif
-
-/* Similarly-named functions */
-#define upper32(a) upper_32_bits(a)
-#define lower32(a) lower_32_bits(a)
-
/****************/
/* arch assists */
/****************/
@@ -64,10 +43,10 @@
#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
static inline void prefetch_for_load(void *p)
{
- asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));
+ asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p));
}
static inline void prefetch_for_store(void *p)
{
- asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p));
+ asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p));
}
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index 3cdf14ef..51a2ac69 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -62,14 +62,16 @@ DPDK_17.08 {
dpio_remove_static_dequeue_channel;
mc_get_soc_version;
mc_get_version;
+ qbman_check_new_result;
qbman_eq_desc_set_dca;
qbman_get_dqrr_from_idx;
qbman_get_dqrr_idx;
qbman_result_DQ_fqd_ctx;
- qbman_result_SCN_state_in_mem;
+ qbman_result_SCN_state;
qbman_swp_dqrr_consume;
qbman_swp_dqrr_next;
- qbman_swp_enqueue_multiple_eqdesc;
+ qbman_swp_enqueue_multiple;
+ qbman_swp_enqueue_multiple_desc;
qbman_swp_interrupt_clear_status;
qbman_swp_push_set;
rte_dpaa2_alloc_dpci_dev;
@@ -77,3 +79,13 @@ DPDK_17.08 {
rte_global_active_dqs_list;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_dpbp_supported;
+ rte_dpaa2_dev_type;
+ rte_dpaa2_intr_disable;
+ rte_dpaa2_intr_enable;
+
+} DPDK_17.08;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index e60d6eba..4c32db62 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -50,11 +50,17 @@ extern "C" {
#include <sys/queue.h>
#include <stdint.h>
#include <inttypes.h>
+#include <linux/vfio.h>
#include <rte_debug.h>
#include <rte_interrupts.h>
#include <rte_dev.h>
#include <rte_bus.h>
+#include <rte_tailq.h>
+
+#include <fslmc_vfio.h>
+
+#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
struct rte_dpaa2_driver;
@@ -64,6 +70,36 @@ TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
extern struct rte_fslmc_bus rte_fslmc_bus;
+enum rte_dpaa2_dev_type {
+ /* Devices backed by DPDK driver */
+ DPAA2_ETH, /**< DPNI type device*/
+ DPAA2_CRYPTO, /**< DPSECI type device */
+ DPAA2_CON, /**< DPCONC type device */
+ /* Devices not backed by a DPDK driver: DPIO, DPBP, DPCI, DPMCP */
+ DPAA2_BPOOL, /**< DPBP type device */
+ DPAA2_IO, /**< DPIO type device */
+ DPAA2_CI, /**< DPCI type device */
+ DPAA2_MPORTAL, /**< DPMCP type device */
+ /* Unknown device placeholder */
+ DPAA2_UNKNOWN
+};
+
+TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object);
+
+typedef int (*rte_dpaa2_obj_create_t)(int vdev_fd,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+/**
+ * A structure describing a DPAA2 object.
+ */
+struct rte_dpaa2_object {
+ TAILQ_ENTRY(rte_dpaa2_object) next; /**< Next in list. */
+ const char *name; /**< Name of Object. */
+ enum rte_dpaa2_dev_type dev_type; /**< Type of device */
+ rte_dpaa2_obj_create_t create;
+};
+
/**
* A structure describing a DPAA2 device.
*/
@@ -74,11 +110,11 @@ struct rte_dpaa2_device {
struct rte_eth_dev *eth_dev; /**< ethernet device */
struct rte_cryptodev *cryptodev; /**< Crypto Device */
};
- uint16_t dev_type; /**< Device Type */
- uint16_t object_id; /**< DPAA2 Object ID */
+ enum rte_dpaa2_dev_type dev_type; /**< Device Type */
+ uint16_t object_id; /**< DPAA2 Object ID */
struct rte_intr_handle intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
- char name[32]; /**< DPAA2 Object name*/
+ char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
};
typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
@@ -93,7 +129,7 @@ struct rte_dpaa2_driver {
struct rte_driver driver; /**< Inherit core driver. */
struct rte_fslmc_bus *fslmc_bus; /**< FSLMC bus reference */
uint32_t drv_flags; /**< Flags for controlling device.*/
- uint16_t drv_type; /**< Driver Type */
+ enum rte_dpaa2_dev_type drv_type; /**< Driver Type */
rte_dpaa2_probe_t probe;
rte_dpaa2_remove_t remove;
};
@@ -143,4 +179,23 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
}
#endif
+/**
+ * Register a DPAA2 MC Object driver.
+ *
+ * @param mc_object
+ * A pointer to a rte_dpaa_object structure describing the mc object
+ * to be registered.
+ */
+void rte_fslmc_object_register(struct rte_dpaa2_object *object);
+
+/** Helper for DPAA2 object registration */
+#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
+RTE_INIT(dpaa2objinitfn_ ##nm); \
+static void dpaa2objinitfn_ ##nm(void) \
+{\
+ (dpaa2_obj).name = RTE_STR(nm);\
+ rte_fslmc_object_register(&dpaa2_obj); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
#endif /* _RTE_FSLMC_H_ */
diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
new file mode 100644
index 00000000..f3df1c4c
--- /dev/null
+++ b/drivers/bus/pci/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_pci.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_pci_version.map
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+SYSTEM := bsd
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_pci
+
+include $(RTE_SDK)/drivers/bus/pci/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PCI_BUS)-include += rte_bus_pci.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/pci/bsd/Makefile b/drivers/bus/pci/bsd/Makefile
new file mode 100644
index 00000000..4450913e
--- /dev/null
+++ b/drivers/bus/pci/bsd/Makefile
@@ -0,0 +1,32 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+SRCS += pci.c
diff --git a/lib/librte_eal/bsdapp/eal/eal_pci.c b/drivers/bus/pci/bsd/pci.c
index 04eacdcc..facc4b12 100644
--- a/lib/librte_eal/bsdapp/eal/eal_pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -57,10 +57,10 @@
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_launch.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -71,7 +71,7 @@
#include <rte_devargs.h>
#include "eal_filesystem.h"
-#include "eal_private.h"
+#include "private.h"
/**
* @file
@@ -323,7 +323,7 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
int ret;
TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
- ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
if (ret > 0)
continue;
else if (ret < 0) {
@@ -368,7 +368,7 @@ rte_pci_scan(void)
};
/* for debug purposes, PCI can be disabled */
- if (internal_config.no_pci)
+ if (!rte_eal_has_pci())
return 0;
fd = open("/dev/pci", O_RDONLY);
@@ -403,6 +403,16 @@ error:
return -1;
}
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ /* Supports only RTE_KDRV_NIC_UIO */
+ return RTE_IOVA_PA;
+}
+
int
pci_update_device(const struct rte_pci_addr *addr)
{
diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile
new file mode 100644
index 00000000..77c5f970
--- /dev/null
+++ b/drivers/bus/pci/linux/Makefile
@@ -0,0 +1,36 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+SRCS += pci.c
+SRCS += pci_uio.c
+SRCS += pci_vfio.c
+
+CFLAGS += -D_GNU_SOURCE
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/drivers/bus/pci/linux/pci.c
index 8951ce74..5da6728f 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -37,14 +37,18 @@
#include <rte_log.h>
#include <rte_bus.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include <rte_devargs.h>
#include <rte_memcpy.h>
+#include <rte_vfio.h>
-#include "eal_filesystem.h"
#include "eal_private.h"
-#include "eal_pci_init.h"
+#include "eal_filesystem.h"
+
+#include "private.h"
+#include "pci_init.h"
/**
* @file
@@ -362,7 +366,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
int ret;
TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
- ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
+ ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
if (ret > 0)
continue;
@@ -391,7 +395,7 @@ pci_update_device(const struct rte_pci_addr *addr)
char filename[PATH_MAX];
snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
- pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
addr->function);
return pci_scan_one(filename, addr);
@@ -456,10 +460,15 @@ rte_pci_scan(void)
struct rte_pci_addr addr;
/* for debug purposes, PCI can be disabled */
- if (internal_config.no_pci)
+ if (!rte_eal_has_pci())
return 0;
- dir = opendir(pci_get_sysfs_path());
+#ifdef VFIO_PRESENT
+ if (!pci_vfio_is_enabled())
+ RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n");
+#endif
+
+ dir = opendir(rte_pci_get_sysfs_path());
if (dir == NULL) {
RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
__func__, strerror(errno));
@@ -474,7 +483,7 @@ rte_pci_scan(void)
continue;
snprintf(dirname, sizeof(dirname), "%s/%s",
- pci_get_sysfs_path(), e->d_name);
+ rte_pci_get_sysfs_path(), e->d_name);
if (pci_scan_one(dirname, &addr) < 0)
goto error;
@@ -487,6 +496,131 @@ error:
return -1;
}
+/*
+ * Is pci device bound to any kdrv
+ */
+static inline int
+pci_one_device_is_bound(void)
+{
+ struct rte_pci_device *dev = NULL;
+ int ret = 0;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_UNKNOWN ||
+ dev->kdrv == RTE_KDRV_NONE) {
+ continue;
+ } else {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Any one of the device bound to uio
+ */
+static inline int
+pci_one_device_bound_uio(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_devargs *devargs;
+ int need_check;
+
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ devargs = dev->device.devargs;
+
+ need_check = 0;
+ switch (rte_pci_bus.bus.conf.scan_mode) {
+ case RTE_BUS_SCAN_WHITELIST:
+ if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
+ need_check = 1;
+ break;
+ case RTE_BUS_SCAN_UNDEFINED:
+ case RTE_BUS_SCAN_BLACKLIST:
+ if (devargs == NULL ||
+ devargs->policy != RTE_DEV_BLACKLISTED)
+ need_check = 1;
+ break;
+ }
+
+ if (!need_check)
+ continue;
+
+ if (dev->kdrv == RTE_KDRV_IGB_UIO ||
+ dev->kdrv == RTE_KDRV_UIO_GENERIC) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Any one of the device has iova as va
+ */
+static inline int
+pci_one_device_has_iova_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (dev->kdrv == RTE_KDRV_VFIO &&
+ rte_pci_match(drv, dev))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get iommu class of PCI devices on the bus.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void)
+{
+ bool is_bound;
+ bool is_vfio_noiommu_enabled = true;
+ bool has_iova_va;
+ bool is_bound_uio;
+ bool spapr_iommu =
+#if defined(RTE_ARCH_PPC_64)
+ true;
+#else
+ false;
+#endif
+
+ is_bound = pci_one_device_is_bound();
+ if (!is_bound)
+ return RTE_IOVA_DC;
+
+ has_iova_va = pci_one_device_has_iova_va();
+ is_bound_uio = pci_one_device_bound_uio();
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
+ !spapr_iommu)
+ return RTE_IOVA_VA;
+
+ if (has_iova_va) {
+ RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. ");
+ if (is_vfio_noiommu_enabled)
+ RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
+ if (is_bound_uio)
+ RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
+ if (spapr_iommu)
+ RTE_LOG(WARNING, EAL, "sPAPR IOMMU does not support IOVA as VA\n");
+ }
+
+ return RTE_IOVA_PA;
+}
+
/* Read PCI config space. */
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset)
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_init.h b/drivers/bus/pci/linux/pci_init.h
index ae2980d6..f342c47d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_init.h
+++ b/drivers/bus/pci/linux/pci_init.h
@@ -34,7 +34,9 @@
#ifndef EAL_PCI_INIT_H_
#define EAL_PCI_INIT_H_
-#include "eal_vfio.h"
+#include <linux/version.h>
+
+#include <rte_vfio.h>
/** IO resource type: */
#define IORESOURCE_IO 0x00000100
@@ -74,6 +76,16 @@ int pci_uio_ioport_unmap(struct rte_pci_ioport *p);
#ifdef VFIO_PRESENT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
+#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
+#else
+#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
+#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
+#endif
+
/* access config space */
int pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
void *buf, size_t len, off_t offs);
@@ -92,6 +104,8 @@ int pci_vfio_ioport_unmap(struct rte_pci_ioport *p);
int pci_vfio_map_resource(struct rte_pci_device *dev);
int pci_vfio_unmap_resource(struct rte_pci_device *dev);
+int pci_vfio_is_enabled(void);
+
#endif
#endif /* EAL_PCI_INIT_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
index fa10329f..92b7f027 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
+++ b/drivers/bus/pci/linux/pci_uio.c
@@ -47,12 +47,13 @@
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_eal_memconfig.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include "eal_filesystem.h"
-#include "eal_pci_init.h"
+#include "pci_init.h"
void *pci_map_addr = NULL;
@@ -154,7 +155,7 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
unsigned int buflen, int create)
{
struct rte_pci_addr *loc = &dev->addr;
- unsigned int uio_num;
+ int uio_num = -1;
struct dirent *e;
DIR *dir;
char dirname[PATH_MAX];
@@ -163,14 +164,14 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
* or uio:uioX */
snprintf(dirname, sizeof(dirname),
- "%s/" PCI_PRI_FMT "/uio", pci_get_sysfs_path(),
+ "%s/" PCI_PRI_FMT "/uio", rte_pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
/* retry with the parent directory */
snprintf(dirname, sizeof(dirname),
- "%s/" PCI_PRI_FMT, pci_get_sysfs_path(),
+ "%s/" PCI_PRI_FMT, rte_pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
@@ -214,7 +215,7 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
return -1;
/* create uio device if we've been asked to */
- if (internal_config.create_uio_dev && create &&
+ if (rte_eal_create_uio_dev() && create &&
pci_mknod_uio_dev(dstbuf, uio_num) < 0)
RTE_LOG(WARNING, EAL, "Cannot create /dev/uio%u\n", uio_num);
@@ -322,7 +323,7 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
/* update devname for mmap */
snprintf(devname, sizeof(devname),
"%s/" PCI_PRI_FMT "/resource%d",
- pci_get_sysfs_path(),
+ rte_pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid,
loc->function, res_idx);
@@ -431,7 +432,7 @@ pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
/* open and read addresses of the corresponding resource in sysfs */
snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource",
- pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
dev->addr.devid, dev->addr.function);
f = fopen(filename, "r");
if (f == NULL) {
@@ -453,7 +454,7 @@ pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
goto error;
}
snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource%d",
- pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ rte_pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
dev->addr.devid, dev->addr.function, bar);
/* mmap the pci resource */
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index aa9d96ed..1f93fa4d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -42,13 +42,15 @@
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
+#include <rte_vfio.h>
#include "eal_filesystem.h"
-#include "eal_pci_init.h"
-#include "eal_vfio.h"
-#include "eal_private.h"
+
+#include "pci_init.h"
+#include "private.h"
/**
* @file
@@ -88,8 +90,7 @@ pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
/* get PCI BAR number where MSI-X interrupts are */
static int
-pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
- uint32_t *msix_table_size)
+pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
{
int ret;
uint32_t reg;
@@ -161,9 +162,10 @@ pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
return -1;
}
- *msix_bar = reg & RTE_PCI_MSIX_TABLE_BIR;
- *msix_table_offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
- *msix_table_size = 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
+ msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
+ msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
+ msix_table->size =
+ 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
return 0;
}
@@ -209,14 +211,18 @@ static int
pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
{
int i, ret, intr_idx;
+ enum rte_intr_mode intr_mode;
/* default to invalid index */
intr_idx = VFIO_PCI_NUM_IRQS;
+ /* Get default / configured intr_mode */
+ intr_mode = rte_eal_vfio_intr_mode();
+
/* get interrupt type from internal config (MSI-X by default, can be
* overridden from the command line
*/
- switch (internal_config.vfio_intr_mode) {
+ switch (intr_mode) {
case RTE_INTR_MODE_MSIX:
intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
break;
@@ -240,7 +246,7 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
int fd = -1;
/* skip interrupt modes we don't want */
- if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE &&
+ if (intr_mode != RTE_INTR_MODE_NONE &&
i != intr_idx)
continue;
@@ -256,7 +262,7 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
/* if this vector cannot be used with eventfd, fail if we explicitly
* specified interrupt type, otherwise continue */
if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
- if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE) {
+ if (intr_mode != RTE_INTR_MODE_NONE) {
RTE_LOG(ERR, EAL,
" interrupt vector does not support eventfd!\n");
return -1;
@@ -277,15 +283,15 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
switch (i) {
case VFIO_PCI_MSIX_IRQ_INDEX:
- internal_config.vfio_intr_mode = RTE_INTR_MODE_MSIX;
+ intr_mode = RTE_INTR_MODE_MSIX;
dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
break;
case VFIO_PCI_MSI_IRQ_INDEX:
- internal_config.vfio_intr_mode = RTE_INTR_MODE_MSI;
+ intr_mode = RTE_INTR_MODE_MSI;
dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
break;
case VFIO_PCI_INTX_IRQ_INDEX:
- internal_config.vfio_intr_mode = RTE_INTR_MODE_LEGACY;
+ intr_mode = RTE_INTR_MODE_LEGACY;
dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
break;
default:
@@ -300,25 +306,157 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
return -1;
}
-/*
- * map the PCI resources of a PCI device in virtual memory (VFIO version).
- * primary and secondary processes follow almost exactly the same path
- */
-int
-pci_vfio_map_resource(struct rte_pci_device *dev)
+static int
+pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
+{
+ uint32_t ioport_bar;
+ int ret;
+
+ ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
+ VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
+ + PCI_BASE_ADDRESS_0 + bar_index*4);
+ if (ret != sizeof(ioport_bar)) {
+ RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
+ PCI_BASE_ADDRESS_0 + bar_index*4);
+ return -1;
+ }
+
+ return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
+}
+
+static int
+pci_rte_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
+{
+ if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
+ RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
+ return -1;
+ }
+
+ /* set bus mastering for the device */
+ if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
+ RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
+ return -1;
+ }
+
+ /*
+ * Reset the device. If the device is not capable of resetting,
+ * then it updates errno as EINVAL.
+ */
+ if (ioctl(vfio_dev_fd, VFIO_DEVICE_RESET) && errno != EINVAL) {
+ RTE_LOG(ERR, EAL, "Unable to reset device! Error: %d (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
+ int bar_index, int additional_flags)
+{
+ struct memreg {
+ unsigned long offset, size;
+ } memreg[2] = {};
+ void *bar_addr;
+ struct pci_msix_table *msix_table = &vfio_res->msix_table;
+ struct pci_map *bar = &vfio_res->maps[bar_index];
+
+ if (bar->size == 0)
+ /* Skip this BAR */
+ return 0;
+
+ if (msix_table->bar_index == bar_index) {
+ /*
+ * VFIO will not let us map the MSI-X table,
+ * but we can map around it.
+ */
+ uint32_t table_start = msix_table->offset;
+ uint32_t table_end = table_start + msix_table->size;
+ table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
+ table_start &= PAGE_MASK;
+
+ if (table_start == 0 && table_end >= bar->size) {
+ /* Cannot map this BAR */
+ RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
+ bar->size = 0;
+ bar->addr = 0;
+ return 0;
+ }
+
+ memreg[0].offset = bar->offset;
+ memreg[0].size = table_start;
+ memreg[1].offset = bar->offset + table_end;
+ memreg[1].size = bar->size - table_end;
+
+ RTE_LOG(DEBUG, EAL,
+ "Trying to map BAR%d that contains the MSI-X "
+ "table. Trying offsets: "
+ "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", bar_index,
+ memreg[0].offset, memreg[0].size,
+ memreg[1].offset, memreg[1].size);
+ } else {
+ memreg[0].offset = bar->offset;
+ memreg[0].size = bar->size;
+ }
+
+ /* reserve the address using an inaccessible mapping */
+ bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
+ MAP_ANONYMOUS | additional_flags, -1, 0);
+ if (bar_addr != MAP_FAILED) {
+ void *map_addr = NULL;
+ if (memreg[0].size) {
+ /* actual map of first part */
+ map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
+ memreg[0].offset,
+ memreg[0].size,
+ MAP_FIXED);
+ }
+
+ /* if there's a second part, try to map it */
+ if (map_addr != MAP_FAILED
+ && memreg[1].offset && memreg[1].size) {
+ void *second_addr = RTE_PTR_ADD(bar_addr,
+ memreg[1].offset -
+ (uintptr_t)bar->offset);
+ map_addr = pci_map_resource(second_addr,
+ vfio_dev_fd,
+ memreg[1].offset,
+ memreg[1].size,
+ MAP_FIXED);
+ }
+
+ if (map_addr == MAP_FAILED || !map_addr) {
+ munmap(bar_addr, bar->size);
+ bar_addr = MAP_FAILED;
+ RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
+ bar_index);
+ return -1;
+ }
+ } else {
+ RTE_LOG(ERR, EAL,
+ "Failed to create inaccessible mapping for BAR%d\n",
+ bar_index);
+ return -1;
+ }
+
+ bar->addr = bar_addr;
+ return 0;
+}
+
+static int
+pci_vfio_map_resource_primary(struct rte_pci_device *dev)
{
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
char pci_addr[PATH_MAX] = {0};
int vfio_dev_fd;
struct rte_pci_addr *loc = &dev->addr;
- int i, ret, msix_bar;
+ int i, ret;
struct mapped_pci_resource *vfio_res = NULL;
- struct mapped_pci_res_list *vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
struct pci_map *maps;
- uint32_t msix_table_offset = 0;
- uint32_t msix_table_size = 0;
- uint32_t ioport_bar;
dev->intr_handle.fd = -1;
dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
@@ -327,91 +465,58 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
- if ((ret = vfio_setup_device(pci_get_sysfs_path(), pci_addr,
- &vfio_dev_fd, &device_info)))
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
return ret;
- /* get MSI-X BAR, if any (we have to know where it is because we can't
- * easily mmap it when using VFIO) */
- msix_bar = -1;
- ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar,
- &msix_table_offset, &msix_table_size);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n", pci_addr);
- close(vfio_dev_fd);
- return -1;
+ /* allocate vfio_res and get region info */
+ vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot store uio mmap details\n", __func__);
+ goto err_vfio_dev_fd;
}
+ memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
- /* if we're in a primary process, allocate vfio_res and get region info */
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
- vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
- if (vfio_res == NULL) {
- RTE_LOG(ERR, EAL,
- "%s(): cannot store uio mmap details\n", __func__);
- close(vfio_dev_fd);
- return -1;
- }
- memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
-
- /* get number of registers (up to BAR5) */
- vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
- VFIO_PCI_BAR5_REGION_INDEX + 1);
- } else {
- /* if we're in a secondary process, just find our tailq entry */
- TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
- if (rte_eal_compare_pci_addr(&vfio_res->pci_addr,
- &dev->addr))
- continue;
- break;
- }
- /* if we haven't found our tailq entry, something's wrong */
- if (vfio_res == NULL) {
- RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
- pci_addr);
- close(vfio_dev_fd);
- return -1;
- }
- }
+ /* get number of registers (up to BAR5) */
+ vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
+ VFIO_PCI_BAR5_REGION_INDEX + 1);
/* map BARs */
maps = vfio_res->maps;
+ vfio_res->msix_table.bar_index = -1;
+ /* get MSI-X BAR, if any (we have to know where it is because we can't
+ * easily mmap it when using VFIO)
+ */
+ ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
+ }
+
for (i = 0; i < (int) vfio_res->nb_maps; i++) {
struct vfio_region_info reg = { .argsz = sizeof(reg) };
void *bar_addr;
- struct memreg {
- unsigned long offset, size;
- } memreg[2] = {};
reg.index = i;
ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
-
if (ret) {
RTE_LOG(ERR, EAL, " %s cannot get device region info "
"error %i (%s)\n", pci_addr, errno, strerror(errno));
- close(vfio_dev_fd);
- if (internal_config.process_type == RTE_PROC_PRIMARY)
- rte_free(vfio_res);
- return -1;
+ goto err_vfio_res;
}
/* chk for io port region */
- ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
- VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
- + PCI_BASE_ADDRESS_0 + i*4);
-
- if (ret != sizeof(ioport_bar)) {
- RTE_LOG(ERR, EAL,
- "Cannot read command (%x) from config space!\n",
- PCI_BASE_ADDRESS_0 + i*4);
- return -1;
- }
-
- if (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) {
- RTE_LOG(INFO, EAL,
- "Ignore mapping IO port bar(%d) addr: %x\n",
- i, ioport_bar);
+ ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
+ if (ret < 0)
+ goto err_vfio_res;
+ else if (ret) {
+ RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
+ i);
continue;
}
@@ -419,124 +524,114 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
continue;
- if (i == msix_bar) {
- /*
- * VFIO will not let us map the MSI-X table,
- * but we can map around it.
- */
- uint32_t table_start = msix_table_offset;
- uint32_t table_end = table_start + msix_table_size;
- table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
- table_start &= PAGE_MASK;
-
- if (table_start == 0 && table_end >= reg.size) {
- /* Cannot map this BAR */
- RTE_LOG(DEBUG, EAL, "Skipping BAR %d\n", i);
- continue;
- } else {
- memreg[0].offset = reg.offset;
- memreg[0].size = table_start;
- memreg[1].offset = reg.offset + table_end;
- memreg[1].size = reg.size - table_end;
-
- RTE_LOG(DEBUG, EAL,
- "Trying to map BAR %d that contains the MSI-X "
- "table. Trying offsets: "
- "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", i,
- memreg[0].offset, memreg[0].size,
- memreg[1].offset, memreg[1].size);
- }
- } else {
- memreg[0].offset = reg.offset;
- memreg[0].size = reg.size;
- }
+ /* try mapping somewhere close to the end of hugepages */
+ if (pci_map_addr == NULL)
+ pci_map_addr = pci_find_max_end_va();
- /* try to figure out an address */
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
- /* try mapping somewhere close to the end of hugepages */
- if (pci_map_addr == NULL)
- pci_map_addr = pci_find_max_end_va();
+ bar_addr = pci_map_addr;
+ pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
- bar_addr = pci_map_addr;
- pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
- } else {
- bar_addr = maps[i].addr;
+ maps[i].addr = bar_addr;
+ maps[i].offset = reg.offset;
+ maps[i].size = reg.size;
+ maps[i].path = NULL; /* vfio doesn't have per-resource paths */
+
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_res;
}
- /* reserve the address using an inaccessible mapping */
- bar_addr = mmap(bar_addr, reg.size, 0, MAP_PRIVATE |
- MAP_ANONYMOUS, -1, 0);
- if (bar_addr != MAP_FAILED) {
- void *map_addr = NULL;
- if (memreg[0].size) {
- /* actual map of first part */
- map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
- memreg[0].offset,
- memreg[0].size,
- MAP_FIXED);
- }
+ dev->mem_resource[i].addr = maps[i].addr;
+ }
- /* if there's a second part, try to map it */
- if (map_addr != MAP_FAILED
- && memreg[1].offset && memreg[1].size) {
- void *second_addr = RTE_PTR_ADD(bar_addr,
- memreg[1].offset -
- (uintptr_t)reg.offset);
- map_addr = pci_map_resource(second_addr,
- vfio_dev_fd, memreg[1].offset,
- memreg[1].size,
- MAP_FIXED);
- }
+ if (pci_rte_vfio_setup_device(dev, vfio_dev_fd) < 0) {
+ RTE_LOG(ERR, EAL, " %s setup device failed\n", pci_addr);
+ goto err_vfio_res;
+ }
- if (map_addr == MAP_FAILED || !map_addr) {
- munmap(bar_addr, reg.size);
- bar_addr = MAP_FAILED;
- }
- }
+ TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
- if (bar_addr == MAP_FAILED ||
- (internal_config.process_type == RTE_PROC_SECONDARY &&
- bar_addr != maps[i].addr)) {
- RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n", pci_addr, i,
- strerror(errno));
- close(vfio_dev_fd);
- if (internal_config.process_type == RTE_PROC_PRIMARY)
- rte_free(vfio_res);
- return -1;
- }
+ return 0;
+err_vfio_res:
+ rte_free(vfio_res);
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
- maps[i].addr = bar_addr;
- maps[i].offset = reg.offset;
- maps[i].size = reg.size;
- maps[i].path = NULL; /* vfio doesn't have per-resource paths */
- dev->mem_resource[i].addr = bar_addr;
+static int
+pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
+{
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char pci_addr[PATH_MAX] = {0};
+ int vfio_dev_fd;
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+
+ struct pci_map *maps;
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
+ /* if we're in a secondary process, just find our tailq entry */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr,
+ &dev->addr))
+ continue;
+ break;
+ }
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ goto err_vfio_dev_fd;
}
- /* if secondary process, do not set up interrupts */
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
- if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
- RTE_LOG(ERR, EAL, " %s error setting up interrupts!\n", pci_addr);
- close(vfio_dev_fd);
- rte_free(vfio_res);
- return -1;
- }
+ /* map BARs */
+ maps = vfio_res->maps;
- /* set bus mastering for the device */
- if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
- RTE_LOG(ERR, EAL, " %s cannot set up bus mastering!\n", pci_addr);
- close(vfio_dev_fd);
- rte_free(vfio_res);
- return -1;
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
+ pci_addr, i, strerror(errno));
+ goto err_vfio_dev_fd;
}
- /* Reset the device */
- ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
+ dev->mem_resource[i].addr = maps[i].addr;
}
- if (internal_config.process_type == RTE_PROC_PRIMARY)
- TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
-
return 0;
+err_vfio_dev_fd:
+ close(vfio_dev_fd);
+ return -1;
+}
+
+/*
+ * map the PCI resources of a PCI device in virtual memory (VFIO version).
+ * primary and secondary processes follow almost exactly the same path
+ */
+int
+pci_vfio_map_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_map_resource_primary(dev);
+ else
+ return pci_vfio_map_resource_secondary(dev);
}
int
@@ -567,7 +662,7 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
return -1;
}
- ret = vfio_release_device(pci_get_sysfs_path(), pci_addr,
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
dev->intr_handle.vfio_dev_fd);
if (ret < 0) {
RTE_LOG(ERR, EAL,
@@ -661,14 +756,8 @@ pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
}
int
-pci_vfio_enable(void)
-{
- return vfio_enable("vfio_pci");
-}
-
-int
pci_vfio_is_enabled(void)
{
- return vfio_is_enabled("vfio_pci");
+ return rte_vfio_is_enabled("vfio_pci");
}
#endif
diff --git a/lib/librte_eal/common/eal_common_pci.c b/drivers/bus/pci/pci_common.c
index 52fd38cd..104fdf90 100644
--- a/lib/librte_eal/common/eal_common_pci.c
+++ b/drivers/bus/pci/pci_common.c
@@ -45,21 +45,21 @@
#include <rte_log.h>
#include <rte_bus.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_per_lcore.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_common.h>
#include <rte_devargs.h>
-#include "eal_private.h"
+#include "private.h"
extern struct rte_pci_bus rte_pci_bus;
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
-const char *pci_get_sysfs_path(void)
+const char *rte_pci_get_sysfs_path(void)
{
const char *path = NULL;
@@ -81,7 +81,7 @@ static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
if (devargs->bus != pbus)
continue;
devargs->bus->parse(devargs->name, &addr);
- if (!rte_eal_compare_pci_addr(&dev->addr, &addr))
+ if (!rte_pci_addr_cmp(&dev->addr, &addr))
return devargs;
}
return NULL;
@@ -110,56 +110,10 @@ pci_name_set(struct rte_pci_device *dev)
dev->device.name = dev->name;
}
-/* map a particular resource from a file */
-void *
-pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
- int additional_flags)
-{
- void *mapaddr;
-
- /* Map the PCI memory resource of device */
- mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | additional_flags, fd, offset);
- if (mapaddr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
- __func__, fd, requested_addr,
- (unsigned long)size, (unsigned long)offset,
- strerror(errno), mapaddr);
- } else
- RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
-
- return mapaddr;
-}
-
-/* unmap a particular resource */
-void
-pci_unmap_resource(void *requested_addr, size_t size)
-{
- if (requested_addr == NULL)
- return;
-
- /* Unmap the PCI memory resource of device */
- if (munmap(requested_addr, size)) {
- RTE_LOG(ERR, EAL, "%s(): cannot munmap(%p, 0x%lx): %s\n",
- __func__, requested_addr, (unsigned long)size,
- strerror(errno));
- } else
- RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
- requested_addr);
-}
-
/*
* Match the PCI Driver and Device using the ID Table
- *
- * @param pci_drv
- * PCI driver from which ID table would be extracted
- * @param pci_dev
- * PCI device to match against the driver
- * @return
- * 1 for successful match
- * 0 for unsuccessful match
*/
-static int
+int
rte_pci_match(const struct rte_pci_driver *pci_drv,
const struct rte_pci_device *pci_dev)
{
@@ -271,6 +225,7 @@ rte_pci_detach_dev(struct rte_pci_device *dev)
{
struct rte_pci_addr *loc;
struct rte_pci_driver *dr;
+ int ret = 0;
if (dev == NULL)
return -EINVAL;
@@ -285,8 +240,11 @@ rte_pci_detach_dev(struct rte_pci_device *dev)
RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
- if (dr->remove && (dr->remove(dev) < 0))
- return -1; /* negative value is an error */
+ if (dr->remove) {
+ ret = dr->remove(dev);
+ if (ret < 0)
+ return ret;
+ }
/* clear driver structure */
dev->driver = NULL;
@@ -350,7 +308,7 @@ rte_pci_probe_one(const struct rte_pci_addr *addr)
goto err_return;
FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_eal_compare_pci_addr(&dev->addr, addr))
+ if (rte_pci_addr_cmp(&dev->addr, addr))
continue;
ret = pci_probe_all_drivers(dev);
@@ -380,7 +338,7 @@ rte_pci_detach(const struct rte_pci_addr *addr)
return -1;
FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_eal_compare_pci_addr(&dev->addr, addr))
+ if (rte_pci_addr_cmp(&dev->addr, addr))
continue;
ret = rte_pci_detach_dev(dev);
@@ -482,8 +440,7 @@ pci_parse(const char *name, void *addr)
struct rte_pci_addr pci_addr;
bool parse;
- parse = (eal_parse_pci_BDF(name, &pci_addr) == 0 ||
- eal_parse_pci_DomBDF(name, &pci_addr) == 0);
+ parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
if (parse && addr != NULL)
*out = pci_addr;
return parse == false;
@@ -559,8 +516,10 @@ pci_unplug(struct rte_device *dev)
pdev = RTE_DEV_TO_PCI(dev);
ret = rte_pci_detach_dev(pdev);
- rte_pci_remove_device(pdev);
- free(pdev);
+ if (ret == 0) {
+ rte_pci_remove_device(pdev);
+ free(pdev);
+ }
return ret;
}
@@ -572,6 +531,7 @@ struct rte_pci_bus rte_pci_bus = {
.plug = pci_plug,
.unplug = pci_unplug,
.parse = pci_parse,
+ .get_iommu_class = rte_pci_get_iommu_class,
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
diff --git a/lib/librte_eal/common/eal_common_pci_uio.c b/drivers/bus/pci/pci_common_uio.c
index 367a6816..06711316 100644
--- a/lib/librte_eal/common/eal_common_pci_uio.c
+++ b/drivers/bus/pci/pci_common_uio.c
@@ -39,11 +39,13 @@
#include <sys/mman.h>
#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_tailq.h>
#include <rte_log.h>
#include <rte_malloc.h>
-#include "eal_private.h"
+#include "private.h"
static struct rte_tailq_elem rte_uio_tailq = {
.name = "UIO_RESOURCE_LIST",
@@ -61,7 +63,7 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
TAILQ_FOREACH(uio_res, uio_res_list, next) {
/* skip this element if it doesn't match our PCI address */
- if (rte_eal_compare_pci_addr(&uio_res->pci_addr, &dev->addr))
+ if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
continue;
for (i = 0; i != uio_res->nb_maps; i++) {
@@ -187,7 +189,7 @@ pci_uio_find_resource(struct rte_pci_device *dev)
TAILQ_FOREACH(uio_res, uio_res_list, next) {
/* skip this element if it doesn't match our PCI address */
- if (!rte_eal_compare_pci_addr(&uio_res->pci_addr, &dev->addr))
+ if (!rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
return uio_res;
}
return NULL;
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
new file mode 100644
index 00000000..2283f093
--- /dev/null
+++ b/drivers/bus/pci/private.h
@@ -0,0 +1,248 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 6WIND. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PCI_PRIVATE_H_
+#define _PCI_PRIVATE_H_
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+struct rte_pci_driver;
+struct rte_pci_device;
+
+/**
+ * Probe the PCI bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int
+rte_pci_probe(void);
+
+/**
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_pci_scan(void);
+
+/**
+ * Probe the single PCI device.
+ *
+ * Scan the content of the PCI bus, and find the pci device specified by pci
+ * address, then call the probe() function for registered driver that has a
+ * matching entry in its id_table for discovered device.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to probe.
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_pci_probe_one(const struct rte_pci_addr *addr);
+
+/**
+ * Close the single PCI device.
+ *
+ * Scan the content of the PCI bus, and find the pci device specified by pci
+ * address, then call the remove() function for registered driver that has a
+ * matching entry in its id_table for discovered device.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to close.
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_pci_detach(const struct rte_pci_addr *addr);
+
+/**
+ * Find the name of a PCI device.
+ */
+void
+pci_name_set(struct rte_pci_device *dev);
+
+/**
+ * Add a PCI device to the PCI Bus (append to PCI Device list). This function
+ * also updates the bus references of the PCI Device (and the generic device
+ * object embedded within.
+ *
+ * @param pci_dev
+ * PCI device to add
+ * @return void
+ */
+void rte_pci_add_device(struct rte_pci_device *pci_dev);
+
+/**
+ * Insert a PCI device in the PCI Bus at a particular location in the device
+ * list. It also updates the PCI Bus reference of the new devices to be
+ * inserted.
+ *
+ * @param exist_pci_dev
+ * Existing PCI device in PCI Bus
+ * @param new_pci_dev
+ * PCI device to be added before exist_pci_dev
+ * @return void
+ */
+void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev);
+
+/**
+ * Remove a PCI device from the PCI Bus. This sets to NULL the bus references
+ * in the PCI device object as well as the generic device object.
+ *
+ * @param pci_device
+ * PCI device to be removed from PCI Bus
+ * @return void
+ */
+void rte_pci_remove_device(struct rte_pci_device *pci_device);
+
+/**
+ * Update a pci device object by asking the kernel for the latest information.
+ *
+ * This function is private to EAL.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to look for
+ * @return
+ * - 0 on success.
+ * - negative on error.
+ */
+int pci_update_device(const struct rte_pci_addr *addr);
+
+/**
+ * Unbind kernel driver for this device
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_unbind_kernel_driver(struct rte_pci_device *dev);
+
+/**
+ * Map the PCI resource of a PCI device in virtual memory
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource(struct rte_pci_device *dev);
+
+/**
+ * Unmap the PCI resource of a PCI device
+ *
+ * This function is private to EAL.
+ */
+void pci_uio_unmap_resource(struct rte_pci_device *dev);
+
+/**
+ * Allocate uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to allocate uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ * If the function returns 0, the pointer will be filled.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_alloc_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource **uio_res);
+
+/**
+ * Free uio resource for PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device to free uio resource
+ * @param uio_res
+ * Pointer to uio resource.
+ */
+void pci_uio_free_resource(struct rte_pci_device *dev,
+ struct mapped_pci_resource *uio_res);
+
+/**
+ * Map device memory to uio resource
+ *
+ * This function is private to EAL.
+ *
+ * @param dev
+ * PCI device that has memory information.
+ * @param res_idx
+ * Memory resource index of the PCI device.
+ * @param uio_res
+ * uio resource that will keep mapping information.
+ * @param map_idx
+ * Mapping information index of the uio resource.
+ * @return
+ * 0 on success, negative on error
+ */
+int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
+ struct mapped_pci_resource *uio_res, int map_idx);
+
+/*
+ * Match the PCI Driver and Device using the ID Table
+ *
+ * @param pci_drv
+ * PCI driver from which ID table would be extracted
+ * @param pci_dev
+ * PCI device to match against the driver
+ * @return
+ * 1 for successful match
+ * 0 for unsuccessful match
+ */
+int
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev);
+
+/**
+ * Get iommu class of PCI devices on the bus.
+ * And return their preferred iova mapping mode.
+ *
+ * @return
+ * - enum rte_iova_mode.
+ */
+enum rte_iova_mode
+rte_pci_get_iommu_class(void);
+
+#endif /* _PCI_PRIVATE_H_ */
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
new file mode 100644
index 00000000..d4a29964
--- /dev/null
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -0,0 +1,340 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BUS_PCI_H_
+#define _RTE_BUS_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Pathname of PCI devices directory. */
+const char *rte_pci_get_sysfs_path(void);
+
+/* Forward declarations */
+struct rte_pci_device;
+struct rte_pci_driver;
+
+/** List of PCI devices */
+TAILQ_HEAD(rte_pci_device_list, rte_pci_device);
+/** List of PCI drivers */
+TAILQ_HEAD(rte_pci_driver_list, rte_pci_driver);
+
+/* PCI Bus iterators */
+#define FOREACH_DEVICE_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.driver_list), next)
+
+struct rte_devargs;
+
+/**
+ * A structure describing a PCI device.
+ */
+struct rte_pci_device {
+ TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_pci_addr addr; /**< PCI location. */
+ struct rte_pci_id id; /**< PCI ID. */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< PCI Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_pci_driver *driver; /**< Associated driver */
+ uint16_t max_vfs; /**< sriov enable if not zero */
+ enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
+};
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_pci_device.
+ */
+#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+
+#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+#define RTE_CLASS_ANY_ID (0xffffff)
+
+#ifdef __cplusplus
+/** C++ macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ RTE_CLASS_ANY_ID, \
+ (vend), \
+ (dev), \
+ PCI_ANY_ID, \
+ PCI_ANY_ID
+#else
+/** Macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+ .class_id = RTE_CLASS_ANY_ID, \
+ .vendor_id = (vend), \
+ .device_id = (dev), \
+ .subsystem_vendor_id = PCI_ANY_ID, \
+ .subsystem_device_id = PCI_ANY_ID
+#endif
+
+/**
+ * Initialisation function for the driver called during PCI probing.
+ */
+typedef int (pci_probe_t)(struct rte_pci_driver *, struct rte_pci_device *);
+
+/**
+ * Uninitialisation function for the driver called during hotplugging.
+ */
+typedef int (pci_remove_t)(struct rte_pci_device *);
+
+/**
+ * A structure describing a PCI driver.
+ */
+struct rte_pci_driver {
+ TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_pci_bus *bus; /**< PCI bus reference. */
+ pci_probe_t *probe; /**< Device Probe function. */
+ pci_remove_t *remove; /**< Device Remove function. */
+ const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
+ uint32_t drv_flags; /**< Flags contolling handling of device. */
+};
+
+/**
+ * Structure describing the PCI bus
+ */
+struct rte_pci_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_pci_device_list device_list; /**< List of PCI devices */
+ struct rte_pci_driver_list driver_list; /**< List of PCI drivers */
+};
+
+/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
+#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device driver supports link state interrupt */
+#define RTE_PCI_DRV_INTR_LSC 0x0008
+/** Device driver supports device removal interrupt */
+#define RTE_PCI_DRV_INTR_RMV 0x0010
+/** Device driver needs to keep mapped resources if unsupported dev detected */
+#define RTE_PCI_DRV_KEEP_MAPPED_RES 0x0020
+/** Device driver supports IOVA as VA */
+#define RTE_PCI_DRV_IOVA_AS_VA 0X0040
+
+/**
+ * Map the PCI device resources in user space virtual memory address
+ *
+ * Note that driver should not call this function when flag
+ * RTE_PCI_DRV_NEED_MAPPING is set, as EAL will do that for
+ * you when it's on.
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_pci_map_device(struct rte_pci_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ */
+void rte_pci_unmap_device(struct rte_pci_device *dev);
+
+/**
+ * Dump the content of the PCI bus.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void rte_pci_dump(FILE *f);
+
+/**
+ * Register a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be registered.
+ */
+void rte_pci_register(struct rte_pci_driver *driver);
+
+/** Helper for PCI device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
+RTE_INIT(pciinitfn_ ##nm); \
+static void pciinitfn_ ##nm(void) \
+{\
+ (pci_drv).driver.name = RTE_STR(nm);\
+ rte_pci_register(&pci_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+/**
+ * Unregister a PCI driver.
+ *
+ * @param driver
+ * A pointer to a rte_pci_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_pci_unregister(struct rte_pci_driver *driver);
+
+/**
+ * Read PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset);
+
+/**
+ * Write PCI config space.
+ *
+ * @param device
+ * A pointer to a rte_pci_device structure describing the device
+ * to use
+ * @param buf
+ * A data buffer containing the bytes should be written
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into PCI config space
+ */
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset);
+
+/**
+ * A structure used to access io resources for a pci device.
+ * rte_pci_ioport is arch, os, driver specific, and should not be used outside
+ * of pci ioport api.
+ */
+struct rte_pci_ioport {
+ struct rte_pci_device *dev;
+ uint64_t base;
+ uint64_t len; /* only filled for memory mapped ports */
+};
+
+/**
+ * Initialize a rte_pci_ioport object for a pci device io resource.
+ *
+ * This object is then used to gain access to those io resources (see below).
+ *
+ * @param dev
+ * A pointer to a rte_pci_device structure describing the device
+ * to use.
+ * @param bar
+ * Index of the io pci resource we want to access.
+ * @param p
+ * The rte_pci_ioport object to be initialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
+
+/**
+ * Release any resources used in a rte_pci_ioport object.
+ *
+ * @param p
+ * The rte_pci_ioport object to be uninitialized.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_pci_ioport_unmap(struct rte_pci_ioport *p);
+
+/**
+ * Read from a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object from which we want to read.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
+
+/**
+ * Write to a io pci resource.
+ *
+ * @param p
+ * The rte_pci_ioport object to which we want to write.
+ * @param data
+ * A data buffer where the bytes should be read into
+ * @param len
+ * The length of the data buffer.
+ * @param offset
+ * The offset into the pci io resource.
+ */
+void rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BUS_PCI_H_ */
diff --git a/drivers/bus/pci/rte_bus_pci_version.map b/drivers/bus/pci/rte_bus_pci_version.map
new file mode 100644
index 00000000..27e9c4f1
--- /dev/null
+++ b/drivers/bus/pci/rte_bus_pci_version.map
@@ -0,0 +1,18 @@
+DPDK_17.11 {
+ global:
+
+ rte_pci_dump;
+ rte_pci_get_sysfs_path;
+ rte_pci_ioport_map;
+ rte_pci_ioport_read;
+ rte_pci_ioport_unmap;
+ rte_pci_ioport_write;
+ rte_pci_map_device;
+ rte_pci_read_config;
+ rte_pci_register;
+ rte_pci_unmap_device;
+ rte_pci_unregister;
+ rte_pci_write_config;
+
+ local: *;
+};
diff --git a/drivers/net/xenvirt/Makefile b/drivers/bus/vdev/Makefile
index 8b4b8f03..84bd7244 100644
--- a/drivers/net/xenvirt/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -34,24 +34,24 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
# library name
#
-LIB = librte_pmd_xenvirt.a
+LIB = librte_bus_vdev.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lxenstore
-EXPORT_MAP := rte_eth_xenvirt_version.map
+# versioning export map
+EXPORT_MAP := rte_bus_vdev_version.map
+# library version
LIBABIVER := 1
-#
-# all source are stored in SRCS-y
-#
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.c rte_xen_lib.c
+SRCS-y += vdev.c
+
+LDLIBS += -lrte_eal
#
# Export include files
#
-SYMLINK-y-include += rte_eth_xenvirt.h
+SYMLINK-y-include += rte_bus_vdev.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eal/common/include/rte_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h
index 29f5a523..41762b85 100644
--- a/lib/librte_eal/common/include/rte_vdev.h
+++ b/drivers/bus/vdev/rte_bus_vdev.h
@@ -124,6 +124,28 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
#define RTE_PMD_REGISTER_ALIAS(nm, alias)\
static const char *vdrvinit_ ## nm ## _alias = RTE_STR(alias)
+/**
+ * Initialize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @param args
+ * The pointer to arguments used by driver initialization.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_init(const char *name, const char *args);
+
+/**
+ * Uninitalize a driver specified by name.
+ *
+ * @param name
+ * The pointer to a driver name to be initialized.
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vdev_uninit(const char *name);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/bus/vdev/rte_bus_vdev_version.map b/drivers/bus/vdev/rte_bus_vdev_version.map
new file mode 100644
index 00000000..707b870c
--- /dev/null
+++ b/drivers/bus/vdev/rte_bus_vdev_version.map
@@ -0,0 +1,10 @@
+DPDK_17.11 {
+ global:
+
+ rte_vdev_init;
+ rte_vdev_register;
+ rte_vdev_uninit;
+ rte_vdev_unregister;
+
+ local: *;
+};
diff --git a/lib/librte_eal/common/eal_common_vdev.c b/drivers/bus/vdev/vdev.c
index f7e547a6..fd7736d6 100644
--- a/lib/librte_eal/common/eal_common_vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -41,12 +41,16 @@
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_bus.h>
-#include <rte_vdev.h>
#include <rte_common.h>
#include <rte_devargs.h>
#include <rte_memory.h>
#include <rte_errno.h>
+#include "rte_bus_vdev.h"
+#include "vdev_logs.h"
+
+int vdev_logtype_bus;
+
/* Forward declare to access virtual bus name */
static struct rte_bus rte_vdev_bus;
@@ -102,7 +106,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
- RTE_LOG(DEBUG, EAL, "Search driver %s to probe device %s\n", name,
+ VDEV_LOG(DEBUG, "Search driver %s to probe device %s\n", name,
rte_vdev_device_name(dev));
if (vdev_parse(name, &driver))
@@ -124,6 +128,7 @@ find_vdev(const char *name)
TAILQ_FOREACH(dev, &vdev_device_list, next) {
const char *devname = rte_vdev_device_name(dev);
+
if (!strncmp(devname, name, strlen(name)))
return dev;
}
@@ -188,7 +193,7 @@ rte_vdev_init(const char *name, const char *args)
ret = vdev_probe_all_drivers(dev);
if (ret) {
if (ret > 0)
- RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
+ VDEV_LOG(ERR, "no driver found for %s\n", name);
goto fail;
}
@@ -211,7 +216,7 @@ vdev_remove_driver(struct rte_vdev_device *dev)
const struct rte_vdev_driver *driver;
if (!dev->device.driver) {
- RTE_LOG(DEBUG, EAL, "no driver attach to device %s\n", name);
+ VDEV_LOG(DEBUG, "no driver attach to device %s\n", name);
return 1;
}
@@ -292,7 +297,7 @@ vdev_probe(void)
continue;
if (vdev_probe_all_drivers(dev)) {
- RTE_LOG(ERR, EAL, "failed to initialize %s device\n",
+ VDEV_LOG(ERR, "failed to initialize %s device\n",
rte_vdev_device_name(dev));
return -1;
}
@@ -340,3 +345,10 @@ static struct rte_bus rte_vdev_bus = {
};
RTE_REGISTER_BUS(vdev, rte_vdev_bus);
+
+RTE_INIT(vdev_init_log)
+{
+ vdev_logtype_bus = rte_log_register("bus.vdev");
+ if (vdev_logtype_bus >= 0)
+ rte_log_set_level(vdev_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/vdev/vdev_logs.h b/drivers/bus/vdev/vdev_logs.h
new file mode 100644
index 00000000..43b0ab8b
--- /dev/null
+++ b/drivers/bus/vdev/vdev_logs.h
@@ -0,0 +1,45 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VDEV_LOGS_H_
+#define _VDEV_LOGS_H_
+
+#include <rte_log.h>
+
+extern int vdev_logtype_bus;
+
+#define VDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vdev_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#endif /* _VDEV_LOGS_H_ */
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7a719b9d..645b6967 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,29 +31,18 @@
include $(RTE_SDK)/mk/rte.vars.mk
-core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_cryptodev
-
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
-DEPDIRS-aesni_gcm = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
-DEPDIRS-aesni_mb = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
-DEPDIRS-armv8 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
-DEPDIRS-openssl = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
-DEPDIRS-qat = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
-DEPDIRS-scheduler = $(core-libs) librte_kvargs librte_reorder
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
-DEPDIRS-snow3g = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
-DEPDIRS-kasumi = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
-DEPDIRS-zuc = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
-DEPDIRS-null = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
-DEPDIRS-dpaa2_sec = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index 6fca5e1c..ddfec4c6 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -53,6 +53,9 @@ EXPORT_MAP := rte_pmd_aesni_gcm_version.map
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index d9c91d06..08dcacce 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -31,12 +31,10 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_byteorder.h>
@@ -224,7 +222,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
RTE_ASSERT(m_src != NULL);
- while (offset >= m_src->data_len) {
+ while (offset >= m_src->data_len && data_length != 0) {
offset -= m_src->data_len;
m_src = m_src->next;
@@ -298,14 +296,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
sym_op->aead.digest.data,
(uint64_t)session->digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
- sym_op->m_dst : sym_op->m_src,
- session->digest_length);
-
- if (!auth_tag) {
- GCM_LOG_ERR("auth_tag");
- return -1;
- }
+ uint8_t *auth_tag = qp->temp_digest;
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
@@ -350,14 +341,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
sym_op->auth.digest.data,
(uint64_t)session->digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
- sym_op->m_dst : sym_op->m_src,
- session->digest_length);
-
- if (!auth_tag) {
- GCM_LOG_ERR("auth_tag");
- return -1;
- }
+ uint8_t *auth_tag = qp->temp_digest;
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
@@ -385,11 +369,10 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
* - Returns NULL on invalid job
*/
static void
-post_process_gcm_crypto_op(struct rte_crypto_op *op,
+post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
- struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
-
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
@@ -397,8 +380,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op,
session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *digest;
- uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
- m->data_len - session->digest_length);
+ uint8_t *tag = (uint8_t *)&qp->temp_digest;
if (session->op == AESNI_GMAC_OP_VERIFY)
digest = op->sym->auth.digest.data;
@@ -414,9 +396,6 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op,
if (memcmp(tag, digest, session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, session->digest_length);
}
}
@@ -435,7 +414,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
struct rte_crypto_op *op,
struct aesni_gcm_session *sess)
{
- post_process_gcm_crypto_op(op, sess);
+ post_process_gcm_crypto_op(qp, op, sess);
/* Free session if a session-less crypto op */
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
@@ -505,22 +484,24 @@ static int aesni_gcm_remove(struct rte_vdev_device *vdev);
static int
aesni_gcm_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
enum aesni_gcm_vector_mode vector_mode;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
GCM_LOG_ERR("AES instructions not supported by CPU");
return -EFAULT;
}
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ GCM_LOG_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_GCM_AVX2;
@@ -529,14 +510,6 @@ aesni_gcm_create(const char *name,
else
vector_mode = RTE_AESNI_GCM_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct aesni_gcm_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- GCM_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_gcm_pmd_ops;
@@ -571,22 +544,17 @@ aesni_gcm_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-
-init_error:
- GCM_LOG_ERR("driver %s: create failed", init_params->name);
-
- aesni_gcm_remove(vdev);
- return -EFAULT;
}
static int
aesni_gcm_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_gcm_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -595,17 +563,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return aesni_gcm_create(name, vdev, &init_params);
}
@@ -613,16 +571,18 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
static int
aesni_gcm_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver aesni_gcm_pmd_drv = {
@@ -630,10 +590,13 @@ static struct rte_vdev_driver aesni_gcm_pmd_drv = {
.remove = aesni_gcm_remove
};
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 48400ac2..0f315f00 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -195,7 +195,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"aesni_gcm_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 7e155729..1c8835b5 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -58,6 +58,8 @@
#define GCM_LOG_DBG(fmt, args...)
#endif
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 16
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
@@ -84,6 +86,11 @@ struct aesni_gcm_qp {
/**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 611d4123..a49f06f2 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -47,12 +47,15 @@ CFLAGS += $(WERROR_FLAGS)
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_pmd_aesni_version.map
+EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 16e14512..70043897 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -30,12 +30,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <des.h>
+
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -188,6 +189,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
+ uint8_t is_aes = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
@@ -217,45 +219,68 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.mode = CBC;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sess->cipher.mode = CNTR;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
sess->cipher.mode = DOCSIS_SEC_BPI;
+ is_aes = 1;
break;
- default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
- return -ENOTSUP;
- }
-
- /* Check key length and choose key expansion function */
- switch (xform->cipher.key.length) {
- case AES_128_BYTES:
- sess->cipher.key_length_in_bytes = AES_128_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
- break;
- case AES_192_BYTES:
- sess->cipher.key_length_in_bytes = AES_192_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.mode = DES;
break;
- case AES_256_BYTES:
- sess->cipher.key_length_in_bytes = AES_256_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_DES;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
- return -EINVAL;
+ MB_LOG_ERR("Unsupported cipher mode parameter");
+ return -ENOTSUP;
}
/* Set IV parameters */
sess->iv.offset = xform->cipher.iv.offset;
sess->iv.length = xform->cipher.iv.length;
- /* Expanded cipher keys */
- (*aes_keyexp_fn)(xform->cipher.key.data,
- sess->cipher.expanded_aes_keys.encode,
- sess->cipher.expanded_aes_keys.decode);
+ /* Check key length and choose key expansion function for AES */
+ if (is_aes) {
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ } else {
+ if (xform->cipher.key.length != 8) {
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+ sess->cipher.key_length_in_bytes = 8;
+
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+ xform->cipher.key.data);
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+ xform->cipher.key.data);
+ }
return 0;
}
@@ -407,7 +432,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
*/
static inline int
set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
- struct rte_crypto_op *op)
+ struct rte_crypto_op *op, uint8_t *digest_idx)
{
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
struct aesni_mb_session *session;
@@ -466,19 +491,8 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set digest output location */
if (job->hash_alg != NULL_HASH &&
session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
- get_digest_byte_length(job->hash_alg));
-
- if (job->auth_tag_output == NULL) {
- MB_LOG_ERR("failed to allocate space in output mbuf "
- "for temp digest");
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -1;
- }
-
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
-
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
job->auth_tag_output = op->sym->auth.digest.data;
}
@@ -507,22 +521,17 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set user data to be crypto operation data struct */
job->user_data = op;
- job->user_data2 = m_dst;
return 0;
}
static inline void
-verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
- struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
-
+verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
+ struct rte_crypto_op *op) {
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
@@ -532,8 +541,7 @@ verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
* @param job JOB_AES_HMAC job to process
*
* @return
- * - Returns processed crypto operation which mbuf is trimmed of output digest
- * used in verification of supplied digest.
+ * - Returns processed crypto operation.
* - Returns NULL on invalid job
*/
static inline struct rte_crypto_op *
@@ -552,7 +560,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (job->hash_alg != NULL_HASH) {
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
- verify_digest(job, op);
+ verify_digest(qp, job, op);
}
break;
default:
@@ -626,13 +634,16 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
}
static inline JOB_AES_HMAC *
-set_job_null_op(JOB_AES_HMAC *job)
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
{
job->chain_order = HASH_CIPHER;
job->cipher_mode = NULL_CIPHER;
job->hash_alg = NULL_HASH;
job->cipher_direction = DECRYPT;
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
return job;
}
@@ -650,6 +661,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
if (unlikely(nb_ops == 0))
return 0;
+ uint8_t digest_idx = qp->digest_idx;
do {
/* Get next operation to process from ingress queue */
retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
@@ -667,10 +679,10 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
}
- retval = set_mb_job_params(job, qp, op);
+ retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
- set_job_null_op(job);
+ set_job_null_op(job, op);
}
/* Submit job to multi-buffer for processing */
@@ -687,6 +699,8 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
} while (processed_jobs < nb_ops);
+ qp->digest_idx = digest_idx;
+
if (processed_jobs < 1)
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
@@ -700,15 +714,23 @@ static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static int
cryptodev_aesni_mb_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ MB_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ MB_LOG_ERR("failed to create cryptodev vdev");
+ return -ENODEV;
+ }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
@@ -720,14 +742,6 @@ cryptodev_aesni_mb_create(const char *name,
else
vector_mode = RTE_AESNI_MB_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct aesni_mb_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_mb_pmd_ops;
@@ -764,41 +778,33 @@ cryptodev_aesni_mb_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
- init_params->name);
-
- cryptodev_aesni_mb_remove(vdev);
- return -EFAULT;
}
static int
cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_mb_private),
rte_socket_id(),
- ""
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
- const char *name;
- const char *input_args;
+ const char *name, *args;
+ int retval;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+
+ args = rte_vdev_device_args(vdev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
@@ -806,16 +812,18 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
static int
cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
@@ -823,10 +831,14 @@ static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
.remove = cryptodev_aesni_mb_remove
};
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_aesni_mb_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+ cryptodev_aesni_mb_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 692b354f..3b3ef0c0 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -247,6 +247,48 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -355,7 +397,7 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"aesni_mb_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
@@ -373,7 +415,7 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
"%s_%s",
qp->name, str);
- if (n > sizeof(ring_name))
+ if (n >= sizeof(ring_name))
return NULL;
r = rte_ring_lookup(ring_name);
@@ -430,6 +472,11 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
memset(&qp->stats, 0, sizeof(qp->stats));
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "digest_mp_%u_%u", dev->data->dev_id, qp_id);
+
/* Initialise multi-buffer manager */
(*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
return 0;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 6676948e..fe3bd730 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -61,6 +61,8 @@
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
+/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
static const unsigned auth_blocksize[] = {
[MD5] = 64,
[SHA1] = 64,
@@ -164,9 +166,17 @@ struct aesni_mb_qp {
/**< Session Mempool */
struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
+ uint8_t digest_idx;
+ /**< Index of the next slot to be used in temp_digests,
+ * to store the digest for a given operation
+ */
+ uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
+ /**< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
-
/** AES-NI multi-buffer private session structure */
struct aesni_mb_session {
JOB_CHAIN_ORDER chain_order;
diff --git a/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map b/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
index ad607bbe..ad607bbe 100644
--- a/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
+++ b/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
index 86611fa2..79c260ff 100644
--- a/drivers/crypto/armv8/Makefile
+++ b/drivers/crypto/armv8/Makefile
@@ -51,12 +51,15 @@ CFLAGS += $(WERROR_FLAGS)
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_armv8_pmd_version.map
+EXPORT_MAP := rte_pmd_armv8_version.map
# external library dependencies
CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index a5c39c9b..97719f27 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -36,8 +36,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -575,8 +574,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
/** Process cipher operation */
static inline void
-process_armv8_chained_op
- (struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+process_armv8_chained_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
crypto_func_t crypto_func;
@@ -633,8 +632,7 @@ process_armv8_chained_op
op->sym->auth.data.length);
}
} else {
- adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
- sess->auth.digest_length);
+ adst = qp->temp_digest;
}
arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -655,15 +653,12 @@ process_armv8_chained_op
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(m_asrc,
- sess->auth.digest_length);
}
}
/** Process crypto operation for mbuf */
static inline int
-process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
struct armv8_crypto_session *sess)
{
struct rte_mbuf *msrc, *mdst;
@@ -676,7 +671,7 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
switch (sess->chain_order) {
case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
- process_armv8_chained_op(op, sess, msrc, mdst);
+ process_armv8_chained_op(qp, op, sess, msrc, mdst);
break;
default:
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -763,7 +758,7 @@ armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
static int
cryptodev_armv8_crypto_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct armv8_crypto_private *internals;
@@ -790,14 +785,7 @@ cryptodev_armv8_crypto_create(const char *name,
return -EFAULT;
}
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct armv8_crypto_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -836,11 +824,12 @@ init_error:
static int
cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct armv8_crypto_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -849,18 +838,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0') {
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- }
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_armv8_crypto_create(name, vdev, &init_params);
}
@@ -869,6 +847,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
static int
cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
@@ -879,18 +858,25 @@ cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
"Closing ARMv8 crypto device %s on numa socket %u\n",
name, rte_socket_id());
- return 0;
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
-static struct rte_vdev_driver armv8_crypto_drv = {
+static struct rte_vdev_driver armv8_crypto_pmd_drv = {
.probe = cryptodev_armv8_crypto_init,
.remove = cryptodev_armv8_crypto_uninit
};
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+static struct cryptodev_driver armv8_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 00297beb..63776b26 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -209,7 +209,7 @@ armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index d02992a6..fa31f0a0 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -69,6 +69,9 @@ do { \
#define NBBY 8 /* Number of bits in a byte */
#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+/* Maximum length for digest (SHA-256 needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+
/** ARMv8 operation order mode enumerator */
enum armv8_crypto_chain_order {
ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
@@ -147,6 +150,11 @@ struct armv8_crypto_qp {
/**< Queue pair statistics */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
/** ARMv8 crypto private session structure */
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_version.map b/drivers/crypto/armv8/rte_pmd_armv8_version.map
index 1f84b68a..1f84b68a 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_version.map
+++ b/drivers/crypto/armv8/rte_pmd_armv8_version.map
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index ae15c99f..a08f2717 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -31,6 +31,12 @@
include $(RTE_SDK)/mk/rte.vars.mk
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
+endif
+endif
+
#
# library name
#
@@ -71,14 +77,9 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_cryptodev
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/bus/fslmc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/mempool/dpaa2
-
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index e0f6cfca..9c64c5d9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -36,6 +36,7 @@
#include <rte_mbuf.h>
#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -73,12 +74,44 @@
#define FLE_POOL_NUM_BUFS 32000
#define FLE_POOL_BUF_SIZE 256
#define FLE_POOL_CACHE_SIZE 512
+#define SEC_FLC_DHR_OUTBOUND -114
+#define SEC_FLC_DHR_INBOUND 0
enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
static uint8_t cryptodev_driver_id;
static inline int
+build_proto_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *mbuf = sym_op->m_src;
+
+ if (likely(bpid < MAX_BPID))
+ DPAA2_SET_FD_BPID(fd, bpid);
+ else
+ DPAA2_SET_FD_IVP(fd);
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
+ DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+
+ /* save physical address of mbuf */
+ op->sym->aead.digest.phys_addr = mbuf->buf_iova;
+ mbuf->buf_iova = (uint64_t)op;
+
+ return 0;
+}
+
+static inline int
build_authenc_gcm_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
@@ -90,11 +123,17 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
int icv_len = sess->digest_length, retval;
uint8_t *old_icv;
+ struct rte_mbuf *dst;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -155,9 +194,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
/* Configure Output SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
- sym_op->m_src->data_off - auth_only_len);
+ dst->data_off - auth_only_len);
sge->length = sym_op->aead.data.length + auth_only_len;
if (sess->dir == DIR_ENC) {
@@ -203,7 +242,6 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->aead.digest.data,
sess->digest_length);
- memset(sym_op->aead.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
@@ -236,9 +274,15 @@ build_authenc_fd(dpaa2_sec_session *sess,
uint8_t *old_icv;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+ struct rte_mbuf *dst;
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -301,9 +345,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
/* Configure Output SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
- sym_op->m_src->data_off);
+ dst->data_off);
sge->length = sym_op->cipher.data.length;
if (sess->dir == DIR_ENC) {
@@ -342,7 +386,6 @@ build_authenc_fd(dpaa2_sec_session *sess,
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
sess->digest_length);
- memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
@@ -436,7 +479,6 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
sess->digest_length);
- memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
@@ -459,9 +501,15 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct ctxt_priv *priv = sess->ctxt;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+ struct rte_mbuf *dst;
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
@@ -506,9 +554,9 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sess->iv.length,
sym_op->m_src->data_off);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
- sym_op->m_src->data_off);
+ dst->data_off);
fle->length = sym_op->cipher.data.length + sess->iv.length;
@@ -545,12 +593,29 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
static inline int
-build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+build_sec_fd(struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
int ret = -1;
+ dpaa2_sec_session *sess;
PMD_INIT_FUNC_TRACE();
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -ENOTSUP;
+ }
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ sess = (dpaa2_sec_session *)get_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ sess = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ else
+ return -1;
switch (sess->ctxt_type) {
case DPAA2_SEC_CIPHER:
@@ -565,6 +630,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
case DPAA2_SEC_CIPHER_HASH:
ret = build_authenc_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_HASH_CIPHER:
default:
RTE_LOG(ERR, PMD, "error: Unsupported session\n");
@@ -588,12 +656,11 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
- dpaa2_sec_session *sess;
if (unlikely(nb_ops == 0))
return 0;
- if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+ if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
return 0;
}
@@ -618,13 +685,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
for (loop = 0; loop < frames_to_send; loop++) {
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
- sess = (dpaa2_sec_session *)
- get_session_private_data(
- (*ops)->sym->session,
- cryptodev_driver_id);
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
- ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
PMD_DRV_LOG(ERR, "error: Improper packet"
" contents for crypto operation\n");
@@ -634,7 +697,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
}
loop = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_send_multiple(swp, &eqdesc,
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
frames_to_send - loop);
}
@@ -649,11 +712,44 @@ skip_tx:
}
static inline struct rte_crypto_op *
-sec_fd_to_mbuf(const struct qbman_fd *fd)
+sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
+{
+ struct rte_crypto_op *op;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ uint16_t diff = 0;
+ dpaa2_sec_session *sess_priv;
+
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ op = (struct rte_crypto_op *)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
+ op->sym->aead.digest.phys_addr = 0L;
+
+ sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (sess_priv->dir == DIR_ENC)
+ mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
+ else
+ mbuf->data_off += SEC_FLC_DHR_INBOUND;
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
+
+ return op;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
struct ctxt_priv *priv;
+ struct rte_mbuf *dst, *src;
+
+ if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
+ return sec_simple_fd_to_mbuf(fd, driver_id);
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
@@ -676,10 +772,17 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FLE_ADDR((fle - 1)));
/* Prefeth op */
- rte_prefetch0(op->sym->m_src);
+ src = op->sym->m_src;
+ rte_prefetch0(src);
+
+ if (op->sym->m_dst) {
+ dst = op->sym->m_dst;
+ rte_prefetch0(dst);
+ } else
+ dst = src;
PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+ (void *)dst, dst->buf_addr);
PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
(void *)DPAA2_GET_FD_ADDR(fd),
@@ -701,6 +804,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
{
/* Function is responsible to receive frames for a given device and VQ*/
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct rte_cryptodev *dev =
+ (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_qp->rx_vq.fqid;
int ret, num_rx = 0;
@@ -747,13 +852,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
* Also seems like the SWP is shared between the Ethernet Driver
* and the SEC driver.
*/
- while (!qbman_check_command_complete(swp, dq_storage))
+ while (!qbman_check_command_complete(dq_storage))
;
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
- while (!qbman_result_has_new_result(swp, dq_storage))
+ while (!qbman_check_new_result(dq_storage))
;
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
@@ -770,7 +875,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
}
fd = qbman_result_DQ_fd(dq_storage);
- ops[num_rx] = sec_fd_to_mbuf(fd);
+ ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
@@ -1547,6 +1652,300 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
}
static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ unsigned int bufsize;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->ctxt_type = DPAA2_SEC_IPSEC;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto out;
+ }
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ struct ip ip4_hdr;
+
+ flc->dhr = SEC_FLC_DHR_OUTBOUND;
+ ip4_hdr.ip_v = IPVERSION;
+ ip4_hdr.ip_hl = 5;
+ ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
+ ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ ip4_hdr.ip_id = 0;
+ ip4_hdr.ip_off = 0;
+ ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_sum = 0;
+ ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
+ sizeof(struct ip));
+
+ /* For Sec Proto only one descriptor is required. */
+ memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
+ encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ encap_pdb.spi = ipsec_xform->spi;
+ encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
+ 1, 0, &encap_pdb,
+ (uint8_t *)&ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ flc->dhr = SEC_FLC_DHR_INBOUND;
+ memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
+ 1, 0, &decap_pdb, &cipherdata, &authdata);
+ } else
+ goto out;
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa2_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "DPAA2 PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa2_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+static int
dpaa2_sec_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
@@ -1820,11 +2219,28 @@ static struct rte_cryptodev_ops crypto_ops = {
.session_clear = dpaa2_sec_session_clear,
};
+static const struct rte_security_capability *
+dpaa2_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa2_sec_security_cap;
+}
+
+struct rte_security_ops dpaa2_sec_security_ops = {
+ .session_create = dpaa2_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa2_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa2_sec_capabilities_get
+};
+
static int
dpaa2_sec_uninit(const struct rte_cryptodev *dev)
{
struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
rte_mempool_free(internals->fle_pool);
PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
@@ -1839,6 +2255,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
struct dpaa2_sec_dev_private *internals;
struct rte_device *dev = cryptodev->device;
struct rte_dpaa2_device *dpaa2_dev;
+ struct rte_security_ctx *security_instance;
struct fsl_mc_io *dpseci;
uint16_t token;
struct dpseci_attr attr;
@@ -1860,7 +2277,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY;
internals = cryptodev->data->dev_private;
internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
@@ -1874,6 +2292,17 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_LOG(DEBUG, "Device already init by primary process");
return 0;
}
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa2_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
/*Open the rte device via MC and save the handle for further use*/
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
@@ -1987,20 +2416,11 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
if (ret)
return ret;
- /* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
- cryptodev->device = NULL;
- cryptodev->data = NULL;
-
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
- .drv_type = DPAA2_MC_DPSECI_DEVID,
+ .drv_type = DPAA2_CRYPTO,
.driver = {
.name = "DPAA2 SEC PMD"
},
@@ -2008,5 +2428,8 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
.remove = cryptodev_dpaa2_sec_remove,
};
+static struct cryptodev_driver dpaa2_sec_crypto_drv;
+
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 3849a052..14e71df5 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -67,6 +67,11 @@ enum shr_desc_type {
#define DIR_ENC 1
#define DIR_DEC 0
+#define DPAA2_SET_FLC_EWS(flc) (flc->word1_bits23_16 |= 0x1)
+#define DPAA2_SET_FLC_RSC(flc) (flc->word1_bits31_24 |= 0x1)
+#define DPAA2_SET_FLC_REUSE_BS(flc) (flc->mode_bits |= 0x8000)
+#define DPAA2_SET_FLC_REUSE_FF(flc) (flc->mode_bits |= 0x2000)
+
/* SEC Flow Context Descriptor */
struct sec_flow_context {
/* word 0 */
@@ -411,4 +416,61 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+
+static const struct rte_security_capability dpaa2_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 4a109620..2a216afc 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -37,18 +37,34 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
#include <fsl_dpseci.h>
#include <fsl_dpseci_cmd.h>
-int
-dpseci_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpseci_id,
- uint16_t *token)
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
{
+ struct dpseci_cmd_open *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -56,23 +72,34 @@ dpseci_open(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
cmd_flags,
0);
- DPSECI_CMD_OPEN(cmd, dpseci_id);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
-int
-dpseci_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
@@ -81,478 +108,569 @@ dpseci_close(struct fsl_mc_io *mc_io,
cmd_flags,
token);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpseci_cfg *cfg,
- uint32_t *obj_id)
+/**
+ * dpseci_create() - Create the DPSECI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
{
+ struct dpseci_cmd_create *cmd_params;
struct mc_command cmd = { 0 };
- int err;
+ int err, i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPSECI_CMD_CREATE(cmd, cfg);
-
- /* send command to mc */
+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
+ for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ cmd_params->priorities[i] = cfg->priorities[i];
+ cmd_params->num_tx_queues = cfg->num_tx_queues;
+ cmd_params->num_rx_queues = cfg->num_rx_queues;
+ cmd_params->options = cfg->options;
+
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
-int
-dpseci_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
{
+ struct dpseci_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
cmd_flags,
dprc_token);
- /* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
-{
- struct mc_command cmd = { 0 };
+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(object_id);
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
- cmd_flags,
- token);
-
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
- cmd_flags,
- token);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_is_enabled(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
- int *en)
-{
- struct mc_command cmd = { 0 };
- int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
- cmd_flags,
- token);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_IS_ENABLED(cmd, *en);
-
- return 0;
-}
-
-int
-dpseci_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
cmd_flags,
token);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpseci_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
- cmd_flags,
- token);
- DPSECI_CMD_GET_IRQ(cmd, irq_index);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-
- return 0;
-}
-
-int
-dpseci_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpseci_irq_cfg *irq_cfg)
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint8_t *en)
+ int *en)
{
+ struct dpseci_rsp_is_enabled *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->en, ENABLE);
return 0;
}
-int
-dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t en)
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask)
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
{
+ struct dpseci_rsp_get_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
+ rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->options = rsp_params->options;
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
return 0;
}
-int
-dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask)
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
{
+ struct dpseci_cmd_set_rx_queue *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-
- /* send command to mc */
+ cmd_params = (struct dpseci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status)
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
{
+ struct dpseci_rsp_get_rx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
+ rsp_params = (struct dpseci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type =
+ dpseci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+ attr->order_preservation_en =
+ dpseci_get_field(rsp_params->order_preservation_en,
+ ORDER_PRESERVATION);
return 0;
}
-int
-dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint32_t status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags,
- token);
- DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_attr *attr)
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
{
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
cmd_flags,
token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_ATTR(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
return 0;
}
-int
-dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- const struct dpseci_rx_queue_cfg *cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
- cmd_flags,
- token);
- DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_rx_queue_attr *attr)
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
{
+ struct dpseci_rsp_get_sec_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
cmd_flags,
token);
- DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
return 0;
}
-int
-dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_tx_queue_attr *attr)
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @counters: Returned SEC counters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
{
+ struct dpseci_rsp_get_sec_counters *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
cmd_flags,
token);
- DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
+ counters->dequeued_requests =
+ le64_to_cpu(rsp_params->dequeued_requests);
+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
return 0;
}
-int
-dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_attr *attr)
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpseci_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
- cmd_flags,
- token);
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
- /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- /* retrieve response parameters */
- DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
-int
-dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- struct dpseci_sec_counters *counters)
+ const struct dpseci_congestion_notification_cfg *cfg)
{
+ struct dpseci_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
- int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
- cmd_flags,
- token);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
-
- return 0;
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ cmd_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cfg->dest_cfg.dest_id;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_ctx = cfg->message_ctx;
+ cmd_params->message_iova = cfg->message_iova;
+ cmd_params->notification_mode = cfg->notification_mode;
+ cmd_params->threshold_entry = cfg->threshold_entry;
+ cmd_params->threshold_exit = cfg->threshold_exit;
+ dpseci_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->type_units,
+ CG_UNITS,
+ cfg->units);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_api_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg)
{
+ struct dpseci_cmd_set_congestion_notification *rsp_params;
struct mc_command cmd = { 0 };
int err;
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
- cmd_flags,
- 0);
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- DPSECI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ rsp_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
+ DEST_TYPE);
return 0;
}
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 6cc14a66..4acb595c 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,7 +37,6 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef __FSL_DPSECI_H
#define __FSL_DPSECI_H
@@ -61,394 +60,89 @@ struct fsl_mc_io;
*/
#define DPSECI_ALL_QUEUES (uint8_t)(-1)
-/**
- * dpseci_open() - Open a control session for the specified object
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpseci_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param dpseci_id DPSECI unique ID
- * @param token Returned token; use in subsequent API calls
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpseci_id,
- uint16_t *token);
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
- * dpseci_close() - Close the control session of the object
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * Enable the Congestion Group support
*/
-int
-dpseci_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+#define DPSECI_OPT_HAS_CG 0x000020
/**
* struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
*/
struct dpseci_cfg {
- uint8_t num_tx_queues; /* num of queues towards the SEC */
- uint8_t num_rx_queues; /* num of queues back from the SEC */
+ uint32_t options;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
uint8_t priorities[DPSECI_PRIO_NUM];
- /**< Priorities for the SEC hardware processing;
- * each place in the array is the priority of the tx queue
- * towards the SEC,
- * valid priorities are configured with values 1-8;
- */
};
-/**
- * dpseci_create() - Create the DPSECI object
- * Create the DPSECI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param dprc_token Parent container token; '0' for default container
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param cfg Configuration structure
- * @param obj_id returned object id
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpseci_cfg *cfg,
- uint32_t *obj_id);
-
-/**
- * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param dprc_token Parent container token; '0' for default container
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param object_id The object id; it must be a valid id within the
- * container that created this object;
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
-
-/**
- * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/**
- * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/**
- * dpseci_is_enabled() - Check if the DPSECI is enabled.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param en Returns '1' if object is enabled; '0' otherwise
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_is_enabled(struct fsl_mc_io *mc_io,
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
uint32_t cmd_flags,
- uint16_t token,
- int *en);
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
-/**
- * struct dpseci_irq_cfg - IRQ configuration
- */
-struct dpseci_irq_cfg {
- uint64_t addr;
- /* Address that must be written to signal a message-based interrupt */
- uint32_t val;
- /* Value to write into irq_addr address */
- int irq_num;
- /* A user defined number associated with this IRQ */
-};
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index Identifies the interrupt index to configure
- * @param irq_cfg IRQ configuration
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpseci_irq_cfg *irq_cfg);
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpseci_get_irq() - Get IRQ information from the DPSECI
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param type Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @param irq_cfg IRQ attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpseci_irq_cfg *irq_cfg);
-
-/**
- * dpseci_set_irq_enable() - Set overall interrupt state.
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param en Interrupt state - enable = 1, disable = 0
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint8_t en);
+ int *en);
-/**
- * dpseci_get_irq_enable() - Get overall interrupt state
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param en Returned Interrupt state - enable = 1,
- * disable = 0
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t *en);
-
-/**
- * dpseci_set_irq_mask() - Set interrupt mask.
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param mask event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting IRQ
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask);
-
-/**
- * dpseci_get_irq_mask() - Get interrupt mask.
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param mask Returned event mask to trigger interrupt
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask);
-
-/**
- * dpseci_get_irq_status() - Get the current status of any pending interrupts
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param status Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status);
-
-/**
- * dpseci_clear_irq_status() - Clear a pending interrupt's status
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param status bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpseci_attr - Structure representing DPSECI attributes
- * @param id: DPSECI object ID
- * @param num_tx_queues: number of queues towards the SEC
- * @param num_rx_queues: number of queues back from the SEC
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
*/
struct dpseci_attr {
- int id; /* DPSECI object ID */
- uint8_t num_tx_queues; /* number of queues towards the SEC */
- uint8_t num_rx_queues; /* number of queues back from the SEC */
+ int id;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint32_t options;
};
-/**
- * dpseci_get_attributes() - Retrieve DPSECI attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param attr Returned object's attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_attr *attr);
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
/**
* enum dpseci_dest - DPSECI destination types
@@ -471,16 +165,16 @@ enum dpseci_dest {
/**
* struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
*/
struct dpseci_dest_cfg {
- enum dpseci_dest dest_type; /* Destination type */
+ enum dpseci_dest dest_type;
int dest_id;
- /* Either DPIO ID or DPCON ID, depending on the destination type */
uint8_t priority;
- /* Priority selection within the DPIO or DPCON channel; valid values
- * are 0-1 or 0-7, depending on the number of priorities in that
- * channel; not relevant for 'DPSECI_DEST_NONE' option
- */
};
/**
@@ -504,243 +198,235 @@ struct dpseci_dest_cfg {
/**
* struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
*/
struct dpseci_rx_queue_cfg {
uint32_t options;
- /* Flags representing the suggested modifications to the queue;
- * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
- */
int order_preservation_en;
- /* order preservation configuration for the rx queue
- * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in
- * 'options'
- */
uint64_t user_ctx;
- /* User context value provided in the frame descriptor of each
- * dequeued frame;
- * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
- */
struct dpseci_dest_cfg dest_cfg;
- /* Queue destination parameters;
- * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
- */
};
-/**
- * dpseci_set_rx_queue() - Set Rx queue configuration
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation; use
- * DPSECI_ALL_QUEUES to configure all Rx queues
- * identically.
- * @param cfg Rx queue configuration
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- const struct dpseci_rx_queue_cfg *cfg);
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
/**
* struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration
+ * on the queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
*/
struct dpseci_rx_queue_attr {
uint64_t user_ctx;
- /* User context value provided in the frame descriptor of
- * each dequeued frame
- */
int order_preservation_en;
- /* Status of the order preservation configuration on the queue */
- struct dpseci_dest_cfg dest_cfg;
- /* Queue destination configuration */
+ struct dpseci_dest_cfg dest_cfg;
uint32_t fqid;
- /* Virtual FQID value to be used for dequeue operations */
};
-/**
- * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation
- * @param attr Returned Rx queue attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_rx_queue_attr *attr);
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
/**
* struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
*/
struct dpseci_tx_queue_attr {
uint32_t fqid;
- /* Virtual FQID to be used for sending frames to SEC hardware */
uint8_t priority;
- /* SEC hardware processing priority for the queue */
};
-/**
- * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation
- * @param attr Returned Tx queue attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_tx_queue_attr *attr);
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
/**
* struct dpseci_sec_attr - Structure representing attributes of the SEC
- * hardware accelerator
- */
+ * hardware accelerator
+ * @ip_id: ID for SEC.
+ * @major_rev: Major revision number for SEC.
+ * @minor_rev: Minor revision number for SEC.
+ * @era: SEC Era.
+ * @deco_num: The number of copies of the DECO that are implemented
+ * in this version of SEC.
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
+ * in this version of SEC.
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
+ * in this version of SEC.
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC.
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC.
+ * @crc_acc_num: The number of copies of the CRC module that are
+ * implemented in this version of SEC.
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC.
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC.
+ * @rng_acc_num: The number of copies of the Random Number Generator that
+ * are implemented in this version of SEC.
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that
+ * are implemented in this version of SEC.
+ * @arc4_acc_num: The number of copies of the ARC4 module that are
+ * implemented in this version of SEC.
+ * @des_acc_num: The number of copies of the DES module that are
+ * implemented in this version of SEC.
+ * @aes_acc_num: The number of copies of the AES module that are
+ * implemented in this version of SEC.
+ **/
struct dpseci_sec_attr {
- uint16_t ip_id; /* ID for SEC */
- uint8_t major_rev; /* Major revision number for SEC */
- uint8_t minor_rev; /* Minor revision number for SEC */
- uint8_t era; /* SEC Era */
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
uint8_t deco_num;
- /* The number of copies of the DECO that are implemented in
- * this version of SEC
- */
uint8_t zuc_auth_acc_num;
- /* The number of copies of ZUCA that are implemented in this
- * version of SEC
- */
uint8_t zuc_enc_acc_num;
- /* The number of copies of ZUCE that are implemented in this
- * version of SEC
- */
uint8_t snow_f8_acc_num;
- /* The number of copies of the SNOW-f8 module that are
- * implemented in this version of SEC
- */
uint8_t snow_f9_acc_num;
- /* The number of copies of the SNOW-f9 module that are
- * implemented in this version of SEC
- */
uint8_t crc_acc_num;
- /* The number of copies of the CRC module that are implemented
- * in this version of SEC
- */
uint8_t pk_acc_num;
- /* The number of copies of the Public Key module that are
- * implemented in this version of SEC
- */
uint8_t kasumi_acc_num;
- /* The number of copies of the Kasumi module that are
- * implemented in this version of SEC
- */
uint8_t rng_acc_num;
- /* The number of copies of the Random Number Generator that are
- * implemented in this version of SEC
- */
uint8_t md_acc_num;
- /* The number of copies of the MDHA (Hashing module) that are
- * implemented in this version of SEC
- */
uint8_t arc4_acc_num;
- /* The number of copies of the ARC4 module that are implemented
- * in this version of SEC
- */
uint8_t des_acc_num;
- /* The number of copies of the DES module that are implemented
- * in this version of SEC
- */
uint8_t aes_acc_num;
- /* The number of copies of the AES module that are implemented
- * in this version of SEC
- */
};
-/**
- * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param attr Returned SEC attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_attr *attr);
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
/**
* struct dpseci_sec_counters - Structure representing global SEC counters and
* not per dpseci counters
+ * @dequeued_requests: Number of Requests Dequeued
+ * @ob_enc_requests: Number of Outbound Encrypt Requests
+ * @ib_dec_requests: Number of Inbound Decrypt Requests
+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
+ * @ob_prot_bytes: Number of Outbound Bytes Protected
+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
+ * @ib_valid_bytes: Number of Inbound Bytes Validated
*/
struct dpseci_sec_counters {
- uint64_t dequeued_requests; /* Number of Requests Dequeued */
- uint64_t ob_enc_requests; /* Number of Outbound Encrypt Requests */
- uint64_t ib_dec_requests; /* Number of Inbound Decrypt Requests */
- uint64_t ob_enc_bytes; /* Number of Outbound Bytes Encrypted */
- uint64_t ob_prot_bytes; /* Number of Outbound Bytes Protected */
- uint64_t ib_dec_bytes; /* Number of Inbound Bytes Decrypted */
- uint64_t ib_valid_bytes; /* Number of Inbound Bytes Validated */
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
};
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
/**
- * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param counters Returned SEC counters
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
*/
-int
-dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_counters *counters);
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
/**
- * dpseci_get_api_version() - Get Data Path SEC Interface API version
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param major_ver Major version of data path sec API
- * @param minor_ver Minor version of data path sec API
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
*/
-int
-dpseci_get_api_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver);
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg);
#endif /* __FSL_DPSECI_H */
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 3f9f4743..a100a0ec 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,220 +37,187 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _FSL_DPSECI_CMD_H
#define _FSL_DPSECI_CMD_H
/* DPSECI Version */
-#define DPSECI_VER_MAJOR 5
-#define DPSECI_VER_MINOR 0
-
-/* Command IDs */
-#define DPSECI_CMDID_CLOSE ((0x800 << 4) | (0x1))
-#define DPSECI_CMDID_OPEN ((0x809 << 4) | (0x1))
-#define DPSECI_CMDID_CREATE ((0x909 << 4) | (0x1))
-#define DPSECI_CMDID_DESTROY ((0x989 << 4) | (0x1))
-#define DPSECI_CMDID_GET_API_VERSION ((0xa09 << 4) | (0x1))
-
-#define DPSECI_CMDID_ENABLE ((0x002 << 4) | (0x1))
-#define DPSECI_CMDID_DISABLE ((0x003 << 4) | (0x1))
-#define DPSECI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
-#define DPSECI_CMDID_RESET ((0x005 << 4) | (0x1))
-#define DPSECI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
-
-#define DPSECI_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
-#define DPSECI_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
-#define DPSECI_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
-#define DPSECI_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
-
-#define DPSECI_CMDID_SET_RX_QUEUE ((0x194 << 4) | (0x1))
-#define DPSECI_CMDID_GET_RX_QUEUE ((0x196 << 4) | (0x1))
-#define DPSECI_CMDID_GET_TX_QUEUE ((0x197 << 4) | (0x1))
-#define DPSECI_CMDID_GET_SEC_ATTR ((0x198 << 4) | (0x1))
-#define DPSECI_CMDID_GET_SEC_COUNTERS ((0x199 << 4) | (0x1))
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
- MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
- MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
- MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
- MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
- MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 1
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
- MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
- MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
- MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
- MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
- MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
- MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
- MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
- MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
- MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
- MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
- MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
- MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
- MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
- MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
- MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
- MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
- MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
- MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
- MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
- MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
- MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
- MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
- MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
- MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
- MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
- MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
+#define DPSECI_CMD_V1(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
+#define DPSECI_CMD_V2(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
+
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpseci_cmd_open {
+ uint32_t dpseci_id;
+};
+
+struct dpseci_cmd_create {
+ uint8_t priorities[8];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad[6];
+ uint32_t options;
+};
+
+struct dpseci_cmd_destroy {
+ uint32_t dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ /* only the first LSB */
+ uint8_t en;
+};
+
+struct dpseci_rsp_get_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad1[6];
+ uint32_t options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t queue;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+};
+
+struct dpseci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t queue;
+};
+
+struct dpseci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+
+};
+
+struct dpseci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t pad1[3];
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t pad2;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pad3;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t pad4;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+struct dpseci_rsp_get_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+struct dpseci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+#define DPSECI_CG_UNITS_SHIFT 4
+#define DPSECI_CG_UNITS_SIZE 2
+
+struct dpseci_cmd_set_congestion_notification {
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
#endif /* _FSL_DPSECI_CMD_H */
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
new file mode 100644
index 00000000..17bc79c6
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -0,0 +1,73 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_sec.a
+
+# build flags
+CFLAGS += -D _GNU_SOURCE
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
+
+# library dependencies
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
new file mode 100644
index 00000000..16155b1a
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -0,0 +1,1548 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <of.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+enum rta_sec_era rta_sec_era;
+
+static uint8_t cryptodev_driver_id;
+
+static __thread struct rte_crypto_op **dpaa_sec_ops;
+static __thread int dpaa_sec_op_nb;
+
+static inline void
+dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
+{
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
+{
+ struct dpaa_sec_op_ctx *ctx;
+ int retval;
+
+ retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || retval) {
+ PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+
+ return ctx;
+}
+
+static inline rte_iova_t
+dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ uint64_t vaddr_64, paddr;
+ int i;
+
+ vaddr_64 = (uint64_t)vaddr;
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (vaddr_64 >= memseg[i].addr_64 &&
+ vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
+ paddr = memseg[i].phys_addr +
+ (vaddr_64 - memseg[i].addr_64);
+
+ return (rte_iova_t)paddr;
+ }
+ }
+ return (rte_iova_t)(NULL);
+}
+
+static inline void *
+dpaa_mem_ptov(rte_iova_t paddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (paddr >= memseg[i].phys_addr &&
+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ return (void *)(memseg[i].addr_64 +
+ (paddr - memseg[i].phys_addr));
+ }
+ return NULL;
+}
+
+static void
+ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
+}
+
+/* initialize the queue with dest chan as caam chan so that
+ * all the packets in this queue could be dispatched into caam
+ */
+static int
+dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
+ uint32_t fqid_out)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t flags;
+ int ret = -1;
+
+ /* Clear FQ options */
+ memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
+
+ flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL;
+
+ ret = qman_create_fq(0, flags, fq_in);
+ if (unlikely(ret != 0)) {
+ PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ return ret;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+
+ qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
+ fq_opts.fqd.context_b = fqid_out;
+ fq_opts.fqd.dest.channel = qm_channel_caam;
+ fq_opts.fqd.dest.wq = 0;
+
+ fq_in->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq_in, flags, &fq_opts);
+ if (unlikely(ret != 0))
+ PMD_INIT_LOG(ERR, "qman_init_fq failed");
+
+ return ret;
+}
+
+/* something is put into in_fq and caam put the crypto result into out_fq */
+static enum qman_cb_dqrr_result
+dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
+ struct qman_fq *fq __always_unused,
+ const struct qm_dqrr_entry *dqrr)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+
+ if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
+ return qman_cb_dqrr_defer;
+
+ if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
+ return qman_cb_dqrr_consume;
+
+ fd = &dqrr->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
+ dpaa_sec_op_ending(ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+/* caam result is put into this queue */
+static int
+dpaa_sec_init_tx(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcc_initfq opts;
+ uint32_t flags;
+
+ flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ ret = qman_create_fq(0, flags, fq);
+ if (unlikely(ret)) {
+ PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ return ret;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+
+ /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
+
+ fq->cb.dqrr = dqrr_out_fq_cb_rx;
+ fq->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (unlikely(ret)) {
+ PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int is_cipher_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_auth_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_aead(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int is_auth_cipher(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_encode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
+{
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
+{
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
+{
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+
+/* prepare command block of the session */
+static int
+dpaa_sec_prep_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ uint32_t shared_desc_len = 0;
+ struct sec_cdb *cdb = &ses->qp->cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported auth alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported aead alg\n");
+ return -ENOTSUP;
+ }
+ alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported auth alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (uint64_t)dpaa_mem_vtop(
+ (void *)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (uint64_t)dpaa_mem_vtop(
+ (void *)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+
+ /* Auth_only_len is set as 0 here and it will be overwritten
+ * in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+ cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
+ cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
+
+ return 0;
+}
+
+static inline unsigned int
+dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+{
+ unsigned int pkts = 0;
+ int ret;
+ struct qm_mcr_queryfq_np np;
+ enum qman_fq_state state;
+ uint32_t flags;
+ uint32_t vdqcr;
+
+ qman_query_fq_np(fq, &np);
+ if (np.frm_cnt) {
+ vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
+ if (exact)
+ vdqcr |= QM_VDQCR_EXACT;
+ ret = qman_volatile_dequeue(fq, 0, vdqcr);
+ if (ret)
+ return 0;
+ do {
+ pkts += qman_poll_dqrr(len);
+ qman_fq_state(fq, &state, &flags);
+ } while (flags & QMAN_FQ_STATE_VDQCR);
+ }
+ return pkts;
+}
+
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+{
+ struct qman_fq *fq;
+
+ fq = &qp->outq;
+ dpaa_sec_op_nb = 0;
+ dpaa_sec_ops = ops;
+
+ if (unlikely(nb_ops > DPAA_SEC_BURST))
+ nb_ops = DPAA_SEC_BURST;
+
+ return dpaa_volatile_deq(fq, nb_ops, 1);
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct dpaa_sec_job *
+build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t start_addr;
+ uint8_t *old_digest;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ start_addr = rte_pktmbuf_iova(mbuf);
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+ if (is_decode(ses)) {
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ sg->length = sym->auth.data.length + ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ /* hash result or digest, save digest first */
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ cpu_to_hw_sg(sg);
+
+ /* let's check digest by hw */
+ start_addr = dpaa_mem_vtop(old_digest);
+ sg++;
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ sg->final = 1;
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ uint32_t length = 0;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg,
+ dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
+ sg->length = sym->aead.data.length + ses->auth_only_len;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static int
+dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
+{
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ struct qm_fd fd;
+ int ret;
+ uint32_t auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
+ cryptodev_driver_id);
+
+ if (unlikely(!qp->ses || qp->ses != ses)) {
+ qp->ses = ses;
+ ses->qp = qp;
+ ret = dpaa_sec_prep_cdb(ses);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -ENOTSUP;
+ }
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else {
+ PMD_TX_LOG(ERR, "not supported sec op");
+ return -ENOTSUP;
+ }
+ if (unlikely(!cf))
+ return -ENOMEM;
+
+ memset(&fd, 0, sizeof(struct qm_fd));
+ qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
+ fd._format1 = qm_fd_compound;
+ fd.length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is overwritten
+ * here in the fd.cmd which will update the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd.cmd = 0x80000000 | auth_only_len;
+ do {
+ ret = qman_enqueue(&qp->inq, &fd, 0);
+ } while (ret != 0);
+
+ return 0;
+}
+
+static uint16_t
+dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ int32_t ret;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+ uint16_t num_tx = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /*Prepare each packet which is to be sent*/
+ for (loop = 0; loop < nb_ops; loop++) {
+ if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+ PMD_TX_LOG(ERR, "sessionless crypto op not supported");
+ return 0;
+ }
+ ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
+ if (!ret)
+ num_tx++;
+ }
+ dpaa_qp->tx_pkts += num_tx;
+ dpaa_qp->tx_errs += nb_ops - num_tx;
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t num_rx;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+
+ num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
+
+ dpaa_qp->rx_pkts += num_rx;
+ dpaa_qp->rx_errs += nb_ops - num_rx;
+
+ PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/** Start queue pair */
+static int
+dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of session structure */
+static unsigned int
+dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa_sec_session);
+}
+
+static int
+dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+{
+ dpaa_sec_session *sess = ses;
+ struct dpaa_sec_qp *qp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->ses != NULL) {
+ PMD_INIT_LOG(ERR, "qp in-use by another session\n");
+ return -EBUSY;
+ }
+
+ qp->ses = sess;
+ sess->qp = qp;
+
+ return dpaa_sec_prep_cdb(sess);
+}
+
+static int
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+{
+ dpaa_sec_session *sess = ses;
+ struct dpaa_sec_qp *qp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->ses != NULL) {
+ qp->ses = NULL;
+ sess->qp = NULL;
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "No session attached to qp");
+ return -EINVAL;
+}
+
+static int
+dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ dpaa_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ dpaa_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ dpaa_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ dpaa_sec_cipher_init(dev, xform, session);
+ dpaa_sec_auth_init(dev, xform->next, session);
+ } else {
+ PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ dpaa_sec_auth_init(dev, xform, session);
+ dpaa_sec_cipher_init(dev, xform->next, session);
+ } else {
+ PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa_sec_aead_init(dev, xform, session);
+
+ } else {
+ PMD_DRV_LOG(ERR, "Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+}
+
+static int
+dpaa_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(dpaa_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->sym.max_nb_sessions_per_qp =
+ RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa_sec_dev_configure,
+ .dev_start = dpaa_sec_dev_start,
+ .dev_stop = dpaa_sec_dev_stop,
+ .dev_close = dpaa_sec_dev_close,
+ .dev_infos_get = dpaa_sec_dev_infos_get,
+ .queue_pair_setup = dpaa_sec_queue_pair_setup,
+ .queue_pair_release = dpaa_sec_queue_pair_release,
+ .queue_pair_start = dpaa_sec_queue_pair_start,
+ .queue_pair_stop = dpaa_sec_queue_pair_stop,
+ .queue_pair_count = dpaa_sec_queue_pair_count,
+ .session_get_size = dpaa_sec_session_get_size,
+ .session_configure = dpaa_sec_session_configure,
+ .session_clear = dpaa_sec_session_clear,
+ .qp_attach_session = dpaa_sec_qp_attach_sess,
+ .qp_detach_session = dpaa_sec_qp_detach_sess,
+};
+
+static int
+dpaa_sec_uninit(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ rte_mempool_free(internals->ctx_pool);
+ rte_free(internals);
+
+ PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp;
+ uint32_t i;
+ int ret;
+ char str[20];
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+ internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
+
+ for (i = 0; i < internals->max_nb_queue_pairs; i++) {
+ /* init qman fq for queue pair */
+ qp = &internals->qps[i];
+ ret = dpaa_sec_init_tx(&qp->outq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ goto init_error;
+ }
+ ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
+ qman_fq_fqid(&qp->outq));
+ if (ret) {
+ PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+ goto init_error;
+ }
+ }
+
+ sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ goto init_error;
+ }
+
+ PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+
+ dpaa_sec_uninit(cryptodev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa_dev->crypto_dev = cryptodev;
+ cryptodev->device = &dpaa_dev->device;
+ cryptodev->device->driver = &dpaa_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
+ break;
+ }
+ }
+ }
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ /* In case of error, cleanup is done */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa_dev->crypto_dev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa_driver rte_dpaa_sec_driver = {
+ .drv_type = FSL_DPAA_CRYPTO,
+ .driver = {
+ .name = "DPAA SEC PMD"
+ },
+ .probe = cryptodev_dpaa_sec_probe,
+ .remove = cryptodev_dpaa_sec_remove,
+};
+
+static struct cryptodev_driver dpaa_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
new file mode 100644
index 00000000..af3f2550
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -0,0 +1,402 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA_SEC_H_
+#define _DPAA_SEC_H_
+
+#define NUM_POOL_CHANNELS 4
+#define DPAA_SEC_BURST 32
+#define DPAA_SEC_ALG_UNSUPPORT (-1)
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
+#define CTX_POOL_CACHE_SIZE 512
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+enum dpaa_sec_op_type {
+ DPAA_SEC_NONE, /*!< No Cipher operations*/
+ DPAA_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA_SEC_AUTH, /*!< Authentication Operations */
+ DPAA_SEC_AEAD, /*!< Authenticated Encryption with associated data */
+ DPAA_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA_SEC_MAX
+};
+
+typedef struct dpaa_sec_session_entry {
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct dpaa_sec_qp *qp;
+ struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
+#define DPAA_SEC_MAX_DESC_SIZE 64
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __packed lo;
+ } __packed sh_hdr;
+
+ uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
+};
+
+struct dpaa_sec_qp {
+ struct dpaa_sec_dev_private *internals;
+ struct sec_cdb cdb; /* cmd block associated with qp */
+ dpaa_sec_session *ses; /* session associated with qp */
+ struct qman_fq inq;
+ struct qman_fq outq;
+ int rx_pkts;
+ int rx_errs;
+ int tx_pkts;
+ int tx_errs;
+};
+
+#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+/* internal sec queue interface */
+struct dpaa_sec_dev_private {
+ void *sec_hw;
+ struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
+ struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+struct dpaa_sec_job {
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct qm_sg_entry sg[MAX_SG_ENTRIES];
+};
+
+#define DPAA_MAX_NB_MAX_DIGEST 32
+struct dpaa_sec_op_ctx {
+ struct dpaa_sec_job job;
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
+ uint32_t fd_status;
+ uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
+};
+
+static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.auth = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+#endif /* _DPAA_SEC_H_ */
diff --git a/drivers/net/xenvirt/virtio_logs.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index d6c33f7b..dfe69757 100644
--- a/drivers/net/xenvirt/virtio_logs.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright NXP 2017.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of NXP nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,40 +31,40 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _VIRTIO_LOGS_H_
-#define _VIRTIO_LOGS_H_
+#ifndef _DPAA_SEC_LOG_H_
+#define _DPAA_SEC_LOG_H_
-#include <rte_log.h>
-
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
#endif
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args)
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args)
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
#endif
-
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
-#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args)
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
#endif
-#endif /* _VIRTIO_LOGS_H_ */
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile
index b47cda0c..cf56b7a0 100644
--- a/drivers/crypto/kasumi/Makefile
+++ b/drivers/crypto/kasumi/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_KASUMI_PATH)
CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 38cd8a9b..f5db5e32 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -31,12 +31,10 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,7 +42,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_DIGEST_LENGTH 4
#define KASUMI_MAX_BURST 4
#define BYTE_LEN 8
@@ -261,7 +258,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_kasumi_hash_op(struct rte_crypto_op **ops,
+process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
struct kasumi_session *session,
uint8_t num_ops)
{
@@ -287,8 +284,7 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
num_bytes = length_in_bits >> 3;
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- KASUMI_DIGEST_LENGTH);
+ dst = qp->temp_digest;
sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
num_bytes, dst);
@@ -296,10 +292,6 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
KASUMI_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- KASUMI_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
@@ -327,16 +319,16 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
session, num_ops);
break;
case KASUMI_OP_ONLY_AUTH:
- processed_ops = process_kasumi_hash_op(ops, session,
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
num_ops);
break;
case KASUMI_OP_CIPHER_AUTH:
processed_ops = process_kasumi_cipher_op(ops, session,
num_ops);
- process_kasumi_hash_op(ops, session, processed_ops);
+ process_kasumi_hash_op(qp, ops, session, processed_ops);
break;
case KASUMI_OP_AUTH_CIPHER:
- processed_ops = process_kasumi_hash_op(ops, session,
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
num_ops);
process_kasumi_cipher_op(ops, session, processed_ops);
break;
@@ -384,15 +376,15 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
session);
break;
case KASUMI_OP_ONLY_AUTH:
- processed_op = process_kasumi_hash_op(&op, session, 1);
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
break;
case KASUMI_OP_CIPHER_AUTH:
processed_op = process_kasumi_cipher_op_bit(op, session);
if (processed_op == 1)
- process_kasumi_hash_op(&op, session, 1);
+ process_kasumi_hash_op(qp, &op, session, 1);
break;
case KASUMI_OP_AUTH_CIPHER:
- processed_op = process_kasumi_hash_op(&op, session, 1);
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
if (processed_op == 1)
process_kasumi_cipher_op_bit(op, session);
break;
@@ -559,15 +551,17 @@ static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
static int
cryptodev_kasumi_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct kasumi_private *internals;
uint64_t cpu_flags = 0;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
@@ -575,14 +569,6 @@ cryptodev_kasumi_create(const char *name,
else
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct kasumi_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- KASUMI_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_kasumi_pmd_ops;
@@ -611,11 +597,12 @@ init_error:
static int
cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct kasumi_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -625,17 +612,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_kasumi_create(name, vdev, &init_params);
}
@@ -643,17 +620,18 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
static int
cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
@@ -661,10 +639,13 @@ static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
.remove = cryptodev_kasumi_remove
};
+static struct cryptodev_driver kasumi_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_kasumi_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index 8033114b..4da12f37 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -183,7 +183,7 @@ kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"kasumi_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index 0ce2a2e3..5f7044b8 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -58,6 +58,8 @@
#define KASUMI_LOG_DBG(fmt, args...)
#endif
+#define KASUMI_DIGEST_LENGTH 4
+
/** private data structure for each virtual KASUMI device */
struct kasumi_private {
unsigned max_nb_queue_pairs;
@@ -78,6 +80,11 @@ struct kasumi_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum kasumi_operation {
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
new file mode 100644
index 00000000..3532f7cf
--- /dev/null
+++ b/drivers/crypto/mrvl/Makefile
@@ -0,0 +1,66 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mrvl_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_ARCH_DMA_ADDR_T_64BIT
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_mrvl_pmd_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
new file mode 100644
index 00000000..c29fa105
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_compat.h
@@ -0,0 +1,50 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c
new file mode 100644
index 00000000..31f3fe58
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd.c
@@ -0,0 +1,857 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+static uint8_t cryptodev_driver_id;
+
+/**
+ * Flag if particular crypto algorithm is supported by PMD/MUSDK.
+ *
+ * The idea is to have Not Supported value as default (0).
+ * This way we need only to define proper map sizes,
+ * non-initialized entries will be by default not supported.
+ */
+enum algo_supported {
+ ALGO_NOT_SUPPORTED = 0,
+ ALGO_SUPPORTED = 1,
+};
+
+/** Map elements for cipher mapping.*/
+struct cipher_params_mapping {
+ enum algo_supported supported; /**< On/Off switch */
+ enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
+ enum sam_cipher_mode cipher_mode; /**< Cipher mode */
+ unsigned int max_key_len; /**< Maximum key length (in bytes)*/
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/** Map elements for auth mapping.*/
+struct auth_params_mapping {
+ enum algo_supported supported; /**< On/off switch */
+ enum sam_auth_alg auth_alg; /**< Auth algorithm */
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/**
+ * Map of supported cipher algorithms.
+ */
+static const
+struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_AES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/**
+ * Map of supported auth algorithms.
+ */
+static const
+struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_MD5 },
+ [RTE_CRYPTO_AUTH_MD5] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_MD5 },
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA1] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_224 },
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA256] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA384] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
+ [RTE_CRYPTO_AUTH_SHA512] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_512 },
+ [RTE_CRYPTO_AUTH_AES_GMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_AES_GMAC },
+};
+
+/**
+ * Map of supported aead algorithms.
+ */
+static const
+struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
+ [RTE_CRYPTO_AEAD_AES_GCM] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_GCM,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/*
+ *-----------------------------------------------------------------------------
+ * Forward declarations.
+ *-----------------------------------------------------------------------------
+ */
+static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
+
+/*
+ *-----------------------------------------------------------------------------
+ * Session Preparation.
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Get xform chain order.
+ *
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns Order of crypto operations.
+ */
+static enum mrvl_crypto_chain_order
+mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ /* Currently, Marvell supports max 2 operations in chain */
+ if (xform->next != NULL && xform->next->next != NULL)
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->next != NULL) {
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
+ return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
+ return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return MRVL_CRYPTO_CHAIN_COMBINED;
+ }
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+/**
+ * Set session parameters for cipher part.
+ *
+ * @param sess Crypto session pointer.
+ * @param cipher_xform Pointer to configuration structure for cipher operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform)
+{
+ /* Make sure we've got proper struct */
+ if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
+ (cipher_map[cipher_xform->cipher.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ sess->sam_sess_params.dir =
+ (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ cipher_map[cipher_xform->cipher.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ cipher_map[cipher_xform->cipher.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (cipher_xform->cipher.key.length >
+ cipher_map[cipher_xform->cipher.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
+ sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for authentication part.
+ *
+ * @param sess Crypto session pointer.
+ * @param auth_xform Pointer to configuration structure for auth operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ /* Make sure we've got proper struct */
+ if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
+ (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.auth_alg =
+ auth_map[auth_xform->auth.algo].auth_alg;
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ auth_xform->auth.digest_length;
+ /* auth_key must be NULL if auth algorithm does not use HMAC */
+ sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
+ auth_xform->auth.key.data : NULL;
+ sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for aead part.
+ *
+ * @param sess Crypto session pointer.
+ * @param aead_xform Pointer to configuration structure for aead operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *aead_xform)
+{
+ /* Make sure we've got proper struct */
+ if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
+ (aead_map[aead_xform->aead.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ aead_map[aead_xform->aead.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ aead_map[aead_xform->aead.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (aead_xform->aead.key.length >
+ aead_map[aead_xform->aead.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
+ sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
+
+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
+ sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
+
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ aead_xform->aead.digest_length;
+
+ sess->sam_sess_params.u.basic.auth_aad_len =
+ aead_xform->aead.aad_length;
+
+ return 0;
+}
+
+/**
+ * Parse crypto transform chain and setup session parameters.
+ *
+ * @param dev Pointer to crypto device
+ * @param sess Poiner to crypto session
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = mrvl_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
+ cipher_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
+ auth_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cipher_xform != NULL) &&
+ (mrvl_crypto_set_cipher_session_parameters(
+ sess, cipher_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+
+ if ((auth_xform != NULL) &&
+ (mrvl_crypto_set_auth_session_parameters(
+ sess, auth_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+
+ if ((aead_xform != NULL) &&
+ (mrvl_crypto_set_aead_session_parameters(
+ sess, aead_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Process Operations
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Prepare a single request.
+ *
+ * This function basically translates DPDK crypto request into one
+ * understandable by MUDSK's SAM. If this is a first request in a session,
+ * it starts the session.
+ *
+ * @param request Pointer to pre-allocated && reset request buffer [Out].
+ * @param src_bd Pointer to pre-allocated source descriptor [Out].
+ * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
+ * @param op Pointer to DPDK crypto operation struct [In].
+ */
+static inline int
+mrvl_request_prepare(struct sam_cio_op_params *request,
+ struct sam_buf_info *src_bd,
+ struct sam_buf_info *dst_bd,
+ struct rte_crypto_op *op)
+{
+ struct mrvl_crypto_session *sess;
+ struct rte_mbuf *dst_mbuf;
+ uint8_t *digest;
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless.",
+ op);
+ return -EINVAL;
+ }
+
+ sess = (struct mrvl_crypto_session *)get_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ if (unlikely(sess == NULL)) {
+ MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ return -EINVAL;
+ }
+
+ /*
+ * If application delivered us null dst buffer, it means it expects
+ * us to deliver the result in src buffer.
+ */
+ dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ /* Single buffers only, sorry. */
+ request->num_bufs = 1;
+ request->src = src_bd;
+ src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
+ src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
+
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ return -1;
+ }
+
+ /* Empty destination. */
+ if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
+ /* Make dst buffer fit at least source data. */
+ if (rte_pktmbuf_append(dst_mbuf,
+ rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ return -1;
+ }
+ }
+
+ request->dst = dst_bd;
+ dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
+
+ /*
+ * We can use all available space in dst_mbuf,
+ * not only what's used currently.
+ */
+ dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
+
+ if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
+ request->cipher_len = op->sym->aead.data.length;
+ request->cipher_offset = op->sym->aead.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_aad = op->sym->aead.aad.data;
+ request->auth_offset = request->cipher_offset;
+ request->auth_len = request->cipher_len;
+ } else {
+ request->cipher_len = op->sym->cipher.data.length;
+ request->cipher_offset = op->sym->cipher.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_offset = op->sym->auth.data.offset;
+ request->auth_len = op->sym->auth.data.length;
+ }
+
+ digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
+ op->sym->aead.digest.data : op->sym->auth.digest.data;
+ if (digest == NULL) {
+ /* No auth - no worry. */
+ return 0;
+ }
+
+ request->auth_icv_offset = request->auth_offset + request->auth_len;
+
+ /*
+ * EIP supports only scenarios where ICV(digest buffer) is placed at
+ * auth_icv_offset. Any other placement means risking errors.
+ */
+ if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
+ /*
+ * This should be the most common case anyway,
+ * EIP will overwrite DST buffer at auth_icv_offset.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ dst_mbuf, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
+ /*
+ * EIP will look for digest at auth_icv_offset
+ * offset in SRC buffer.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ op->sym->m_src, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ }
+
+ /*
+ * If we landed here it means that digest pointer is
+ * at different than expected place.
+ */
+ return -1;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * PMD Framework handlers
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Enqueue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements consumed from ops.
+ */
+static uint16_t
+mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t iter_ops = 0;
+ uint16_t to_enq = 0;
+ uint16_t consumed = 0;
+ int ret;
+ struct sam_cio_op_params requests[nb_ops];
+ /*
+ * DPDK uses single fragment buffers, so we can KISS descriptors.
+ * SAM does not store bd pointers, so on-stack scope will be enough.
+ */
+ struct sam_buf_info src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
+
+ if (nb_ops == 0)
+ return 0;
+
+ /* Prepare the burst. */
+ memset(&requests, 0, sizeof(requests));
+
+ /* Iterate through */
+ for (; iter_ops < nb_ops; ++iter_ops) {
+ if (mrvl_request_prepare(&requests[iter_ops],
+ &src_bd[iter_ops],
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_CRYPTO_LOG_ERR(
+ "Error while parameters preparation!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+ break;
+ }
+
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ /* Increase the number of ops to enqueue. */
+ ++to_enq;
+ } /* for (; iter_ops < nb_ops;... */
+
+ if (to_enq > 0) {
+ /* Send the burst */
+ ret = sam_cio_enq(qp->cio, requests, &to_enq);
+ consumed += to_enq;
+ if (ret < 0) {
+ /*
+ * Trust SAM that in this case returned value will be at
+ * some point correct (now it is returned unmodified).
+ */
+ qp->stats.enqueue_err_count += to_enq;
+ for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ qp->stats.enqueued_count += to_enq;
+ return consumed;
+}
+
+/**
+ * Dequeue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements dequeued.
+ */
+static uint16_t
+mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int ret;
+ struct mrvl_crypto_qp *qp = queue_pair;
+ struct sam_cio *cio = qp->cio;
+ struct sam_cio_op_result results[nb_ops];
+ uint16_t i;
+
+ ret = sam_cio_deq(cio, results, &nb_ops);
+ if (ret < 0) {
+ /* Count all dequeued as error. */
+ qp->stats.dequeue_err_count += nb_ops;
+
+ /* But act as they were dequeued anyway*/
+ qp->stats.dequeued_count += nb_ops;
+
+ return 0;
+ }
+
+ /* Unpack and check results. */
+ for (i = 0; i < nb_ops; ++i) {
+ ops[i] = results[i].cookie;
+
+ switch (results[i].status) {
+ case SAM_CIO_OK:
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case SAM_CIO_ERR_ICV:
+ MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ break;
+ default:
+ MRVL_CRYPTO_LOG_DBG(
+ "CIO returned Error: %d", results[i].status);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+ }
+
+ qp->stats.dequeued_count += nb_ops;
+ return nb_ops;
+}
+
+/**
+ * Create a new crypto device.
+ *
+ * @param name Driver name.
+ * @param vdev Pointer to device structure.
+ * @param init_params Pointer to initialization parameters.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct mrvl_crypto_private *internals;
+ struct sam_init_params sam_params;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_mrvl_crypto_pmd_ops;
+
+ /* Register rx/tx burst functions for data path. */
+ dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
+ dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized.
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ return ret;
+
+ MRVL_CRYPTO_LOG_INFO(
+ "DMA memory has been already initialized by a different driver.");
+ }
+
+ sam_params.max_num_sessions = internals->max_nb_sessions;
+
+ return sam_init(&sam_params);
+
+init_error:
+ MRVL_CRYPTO_LOG_ERR(
+ "driver %s: %s failed", init_params->name, __func__);
+
+ cryptodev_mrvl_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/**
+ * Initialize the crypto device.
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = { };
+ const char *name, *args;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ args = rte_vdev_device_args(vdev);
+
+ init_params.private_data_size = sizeof(struct mrvl_crypto_private);
+ init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
+ init_params.max_nb_sessions =
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
+ init_params.socket_id = rte_socket_id();
+
+ ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
+}
+
+/**
+ * Uninitialize the crypto device
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing Marvell crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ sam_deinit();
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+/**
+ * Basic driver handlers for use in the constructor.
+ */
+static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
+ .probe = cryptodev_mrvl_crypto_init,
+ .remove = cryptodev_mrvl_crypto_uninit
+};
+
+static struct cryptodev_driver mrvl_crypto_drv;
+
+/* Register the driver in constructor. */
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
new file mode 100644
index 00000000..434cf850
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
@@ -0,0 +1,778 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+/**
+ * Capabilities list to be used in reporting to DPDK.
+ */
+static const struct rte_cryptodev_capabilities
+ mrvl_crypto_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/**
+ * Configure device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param config Pointer to configuration structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/**
+ * Start device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/**
+ * Stop device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/**
+ * Get device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param stats Pointer to statistics structure [out].
+ */
+static void
+mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/**
+ * Reset device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ */
+static void
+mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+/**
+ * Get device info (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param dev_info Pointer to the device info structure [out].
+ */
+static void
+mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct mrvl_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = mrvl_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/**
+ * Release queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of Queue Pair to release.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct mrvl_crypto_qp *qp =
+ (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id];
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ sam_cio_flush(qp->cio);
+ sam_cio_deinit(qp->cio);
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * Close device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_close(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ return 0;
+}
+
+/**
+ * Setup a queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @param qp_conf Queue pair configuration (nb of descriptors).
+ * @param socket_id NUMA socket to allocate memory on.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct mrvl_crypto_qp *qp = NULL;
+ char match[RTE_CRYPTODEV_NAME_LEN];
+ unsigned int n;
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ /* Free old qp prior setup if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ do { /* Error handling block */
+
+ /*
+ * This extra check is necessary due to a bug in
+ * crypto library.
+ */
+ int num = sam_get_num_inst();
+ if (num == 0) {
+ MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ return -1;
+ }
+
+ /*
+ * In case two crypto engines are enabled qps will
+ * be evenly spread among them. Even and odd qps will
+ * be handled by cio-0 and cio-1 respectively. qp-cio mapping
+ * will look as follows:
+ *
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1
+ *
+ * qp: 4 5 6 7
+ * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3
+ *
+ * In case just one engine is enabled mapping will look as
+ * follows:
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3
+ */
+ n = snprintf(match, sizeof(match), "cio-%u:%u",
+ qp_id % num, qp_id / num);
+
+ if (n >= sizeof(match))
+ break;
+
+ qp->cio_params.match = match;
+ qp->cio_params.size = qp_conf->nb_descriptors;
+
+ if (sam_cio_init(&qp->cio_params, &qp->cio) < 0)
+ break;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ dev->data->queue_pairs[qp_id] = qp;
+ return 0;
+ } while (0);
+
+ rte_free(qp);
+ return -1;
+}
+
+/** Start queue pair (PMD ops callback) - not supported.
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @returns -ENOTSUP. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair (PMD ops callback) - not supported.
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @returns -ENOTSUP. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns Number of allocated queue pairs.
+ */
+static uint32_t
+mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure [Unused].
+ * @returns Size of Marvell crypto session.
+ */
+static unsigned
+mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
+{
+ return sizeof(struct mrvl_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param xform Pointer to the crytpo configuration structure.
+ * @param sess Pointer to the empty session structure.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ struct mrvl_crypto_session *mrvl_sess;
+ void *sess_private_data;
+ int ret;
+
+ if (sess == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR("Couldn't get object from session mempool.");
+ return -ENOMEM;
+ }
+
+ ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
+ if (sam_session_create(&mrvl_sess->sam_sess_params,
+ &mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * Clear the memory of session so it doesn't leave key material behind.
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ struct mrvl_crypto_session *mrvl_sess =
+ (struct mrvl_crypto_session *)sess_priv;
+
+ if (mrvl_sess->sam_sess &&
+ sam_session_destroy(mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ }
+
+ memset(sess, 0, sizeof(struct mrvl_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+/**
+ * PMD handlers for crypto ops.
+ */
+static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
+ .dev_configure = mrvl_crypto_pmd_config,
+ .dev_start = mrvl_crypto_pmd_start,
+ .dev_stop = mrvl_crypto_pmd_stop,
+ .dev_close = mrvl_crypto_pmd_close,
+
+ .dev_infos_get = mrvl_crypto_pmd_info_get,
+
+ .stats_get = mrvl_crypto_pmd_stats_get,
+ .stats_reset = mrvl_crypto_pmd_stats_reset,
+
+ .queue_pair_setup = mrvl_crypto_pmd_qp_setup,
+ .queue_pair_release = mrvl_crypto_pmd_qp_release,
+ .queue_pair_start = mrvl_crypto_pmd_qp_start,
+ .queue_pair_stop = mrvl_crypto_pmd_qp_stop,
+ .queue_pair_count = mrvl_crypto_pmd_qp_count,
+
+ .session_get_size = mrvl_crypto_pmd_session_get_size,
+ .session_configure = mrvl_crypto_pmd_session_configure,
+ .session_clear = mrvl_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
new file mode 100644
index 00000000..923faaf9
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
@@ -0,0 +1,123 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MRVL_PMD_PRIVATE_H_
+#define _RTE_MRVL_PMD_PRIVATE_H_
+
+#include "rte_mrvl_compat.h"
+
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+/**< Marvell PMD device name */
+
+#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#else
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+/**
+ * Handy bits->bytes conversion macro.
+ */
+#define BITS2BYTES(x) ((x) >> 3)
+
+/** The operation order mode enumerator. */
+enum mrvl_crypto_chain_order {
+ MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
+ MRVL_CRYPTO_CHAIN_AUTH_ONLY,
+ MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
+ MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
+ MRVL_CRYPTO_CHAIN_COMBINED,
+ MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
+};
+
+/** Private data structure for each crypto device. */
+struct mrvl_crypto_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** MRVL crypto queue pair structure. */
+struct mrvl_crypto_qp {
+ /** SAM CIO (MUSDK Queue Pair equivalent).*/
+ struct sam_cio *cio;
+
+ /** Session Mempool. */
+ struct rte_mempool *sess_mp;
+
+ /** Queue pair statistics. */
+ struct rte_cryptodev_stats stats;
+
+ /** CIO initialization parameters.*/
+ struct sam_cio_params cio_params;
+} __rte_cache_aligned;
+
+/** MRVL crypto private session structure. */
+struct mrvl_crypto_session {
+ /** Crypto operations chain order. */
+ enum mrvl_crypto_chain_order chain_order;
+
+ /** Session initialization parameters. */
+ struct sam_session_params sam_sess_params;
+
+ /** SAM session pointer. */
+ struct sam_sa *sam_sess;
+
+ /** Cipher IV offset. */
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+/** Set and validate MRVL crypto session parameters */
+extern int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
+
+#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
index bc2724b3..49ada097 100644
--- a/drivers/crypto/null/Makefile
+++ b/drivers/crypto/null/Makefile
@@ -37,6 +37,9 @@ LIB = librte_pmd_null_crypto.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library version
LIBABIVER := 1
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 2c827257..f031d3b8 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -31,10 +31,8 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
@@ -183,28 +181,19 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
-static int cryptodev_null_remove(const char *name);
-
/** Create crypto device */
static int
cryptodev_null_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct null_crypto_private *internals;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct null_crypto_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
+ return -EFAULT;
}
dev->driver_id = cryptodev_driver_id;
@@ -224,61 +213,53 @@ cryptodev_null_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-
-init_error:
- NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed",
- init_params->name);
- cryptodev_null_remove(init_params->name);
-
- return -EFAULT;
}
/** Initialise null crypto device */
static int
cryptodev_null_probe(struct rte_vdev_device *dev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct null_crypto_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
- const char *name;
+ const char *name, *args;
+ int retval;
name = rte_vdev_device_name(dev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
- name, init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
return cryptodev_null_create(name, dev, &init_params);
}
-/** Uninitialise null crypto device */
static int
-cryptodev_null_remove(const char *name)
+cryptodev_null_remove_dev(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
- name, rte_socket_id());
-
- return 0;
-}
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
-static int
-cryptodev_null_remove_dev(struct rte_vdev_device *dev)
-{
- return cryptodev_null_remove(rte_vdev_device_name(dev));
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_null_pmd_drv = {
@@ -286,10 +267,13 @@ static struct rte_vdev_driver cryptodev_null_pmd_drv = {
.remove = cryptodev_null_remove_dev,
};
+static struct cryptodev_driver null_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_null_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index 76153203..c427050a 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -175,7 +175,7 @@ null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"null_crypto_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index 4d1c3c9e..7c7852fb 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -33,8 +33,6 @@
#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
#define _NULL_CRYPTO_PMD_PRIVATE_H_
-#include "rte_config.h"
-
#define CRYPTODEV_NAME_NULL_PMD crypto_null
/**< Null crypto PMD device name */
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
index e5fdfb59..1a006432 100644
--- a/drivers/crypto/openssl/Makefile
+++ b/drivers/crypto/openssl/Makefile
@@ -45,6 +45,9 @@ EXPORT_MAP := rte_pmd_openssl_version.map
# external library dependencies
LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 0bd5f98e..06e1a6de 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -34,11 +34,11 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <openssl/hmac.h>
#include <openssl/evp.h>
#include "rte_openssl_pmd_private.h"
@@ -47,6 +47,25 @@
static uint8_t cryptodev_driver_id;
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx = OPENSSL_malloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ if (ctx != NULL) {
+ HMAC_CTX_cleanup(ctx);
+ OPENSSL_free(ctx);
+ }
+}
+#endif
+
static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
/*----------------------------------------------------------------------------*/
@@ -267,6 +286,21 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
res = -EINVAL;
}
break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ccm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ccm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ccm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
default:
res = -EINVAL;
break;
@@ -278,6 +312,125 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
return res;
}
+/* Set session AEAD encryption parameters */
+static int
+openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ do_ccm = 0;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length,
+ NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set session AEAD decryption parameters */
+static int
+openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm = 0;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type,
+ sess->iv.length, NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
/** Set session cipher parameters */
static int
openssl_set_session_cipher_parameters(struct openssl_session *sess,
@@ -307,6 +460,22 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
break;
@@ -319,6 +488,33 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
sess->cipher.key.data) != 0)
return -EINVAL;
break;
+
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
sess->cipher.algo = xform->cipher.algo;
sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
@@ -333,6 +529,23 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
@@ -351,36 +564,31 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
sess->auth.operation = xform->auth.op;
sess->auth.algo = xform->auth.algo;
+ sess->auth.digest_length = xform->auth.digest_length;
+
/* Select auth algo */
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
- sess->chain_order = OPENSSL_CHAIN_COMBINED;
-
- /* Set IV parameters */
- sess->iv.offset = xform->auth.iv.offset;
- sess->iv.length = xform->auth.iv.length;
-
/*
* OpenSSL requires GMAC to be a GCM operation
* with no cipher data length
*/
- sess->cipher.mode = OPENSSL_CIPHER_LIB;
- if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
- sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- else
- sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
-
sess->cipher.key.length = xform->auth.key.length;
- sess->cipher.ctx = EVP_CIPHER_CTX_new();
- if (get_aead_algo(RTE_CRYPTO_AEAD_AES_GCM,
- sess->cipher.key.length,
- &sess->cipher.evp_algo) != 0)
- return -EINVAL;
-
- get_cipher_key(xform->auth.key.data, xform->auth.key.length,
- sess->cipher.key.data);
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ return openssl_set_sess_aead_enc_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
break;
case RTE_CRYPTO_AUTH_MD5:
@@ -403,20 +611,22 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
case RTE_CRYPTO_AUTH_SHA384_HMAC:
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
- sess->auth.hmac.ctx = EVP_MD_CTX_create();
+ sess->auth.hmac.ctx = HMAC_CTX_new();
if (get_auth_algo(xform->auth.algo,
&sess->auth.hmac.evp_algo) != 0)
return -EINVAL;
- sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
- xform->auth.key.data, xform->auth.key.length);
+
+ if (HMAC_Init_ex(sess->auth.hmac.ctx,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ sess->auth.hmac.evp_algo, NULL) != 1)
+ return -EINVAL;
break;
default:
return -ENOTSUP;
}
- sess->auth.digest_length = xform->auth.digest_length;
-
return 0;
}
@@ -425,43 +635,33 @@ static int
openssl_set_session_aead_parameters(struct openssl_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- /* Select cipher direction */
- sess->cipher.direction = xform->cipher.op;
/* Select cipher key */
sess->cipher.key.length = xform->aead.key.length;
/* Set IV parameters */
- sess->iv.offset = xform->aead.iv.offset;
- sess->iv.length = xform->aead.iv.length;
-
- /* Select auth generate/verify */
- sess->auth.operation = xform->auth.op;
- sess->auth.algo = xform->auth.algo;
-
- /* Select auth algo */
- switch (xform->aead.algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- sess->cipher.mode = OPENSSL_CIPHER_LIB;
- sess->aead_algo = xform->aead.algo;
- sess->cipher.ctx = EVP_CIPHER_CTX_new();
-
- if (get_aead_algo(sess->aead_algo, sess->cipher.key.length,
- &sess->cipher.evp_algo) != 0)
- return -EINVAL;
-
- get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
- sess->cipher.key.data);
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+ /*
+ * For AES-CCM, the actual IV is placed
+ * one byte after the start of the IV field,
+ * according to the API.
+ */
+ sess->iv.offset = xform->aead.iv.offset + 1;
+ else
+ sess->iv.offset = xform->aead.iv.offset;
- sess->chain_order = OPENSSL_CHAIN_COMBINED;
- break;
- default:
- return -ENOTSUP;
- }
+ sess->iv.length = xform->aead.iv.length;
sess->auth.aad_length = xform->aead.aad_length;
sess->auth.digest_length = xform->aead.digest_length;
- return 0;
+ sess->aead_algo = xform->aead.algo;
+ /* Select cipher direction */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return openssl_set_sess_aead_enc_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
}
/** Parse crypto xform chain and set private session parameters */
@@ -547,7 +747,7 @@ openssl_reset_session(struct openssl_session *sess)
break;
case OPENSSL_AUTH_AS_HMAC:
EVP_PKEY_free(sess->auth.hmac.pkey);
- EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
+ HMAC_CTX_free(sess->auth.hmac.ctx);
break;
default:
break;
@@ -693,12 +893,11 @@ process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
/** Process standard openssl cipher encryption */
static int
process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
- int offset, uint8_t *iv, uint8_t *key, int srclen,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
{
int totlen;
- if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_cipher_encrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
@@ -743,12 +942,11 @@ process_cipher_encrypt_err:
/** Process standard openssl cipher decryption */
static int
process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
- int offset, uint8_t *iv, uint8_t *key, int srclen,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
{
int totlen;
- if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_cipher_decrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
@@ -823,23 +1021,16 @@ process_cipher_des3ctr_err:
return -EINVAL;
}
-/** Process auth/encription aes-gcm algorithm */
+/** Process AES-GCM encrypt algorithm */
static int
process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
- int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
- if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
- goto process_auth_encryption_gcm_err;
-
- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
- goto process_auth_encryption_gcm_err;
-
- if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_auth_encryption_gcm_err;
if (aadlen > 0)
@@ -868,25 +1059,60 @@ process_auth_encryption_gcm_err:
return -EINVAL;
}
+/** Process AES-CCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t taglen, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_GET_TAG, taglen, tag) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ return 0;
+
+process_auth_encryption_ccm_err:
+ OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM decrypt algorithm */
static int
process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
- int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx,
- const EVP_CIPHER *algo)
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
- if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
- goto process_auth_decryption_gcm_err;
-
- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
- goto process_auth_decryption_gcm_err;
-
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
goto process_auth_decryption_gcm_err;
- if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_auth_decryption_gcm_err;
if (aadlen > 0)
@@ -903,16 +1129,52 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
goto process_auth_decryption_gcm_err;
if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
- goto process_auth_decryption_gcm_final_err;
+ return -EFAULT;
return 0;
process_auth_decryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
+ OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed");
return -EINVAL;
+}
-process_auth_decryption_gcm_final_err:
- return -EFAULT;
+/** Process AES-CCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t tag_len,
+ EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, tag_len, tag) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_ccm_err:
+ OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed");
+ return -EINVAL;
}
/** Process standard openssl auth algorithms */
@@ -971,10 +1233,9 @@ process_auth_err:
/** Process standard openssl auth algorithms with hmac */
static int
process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
- __rte_unused uint8_t *iv, EVP_PKEY *pkey,
- int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+ int srclen, HMAC_CTX *ctx)
{
- size_t dstlen;
+ unsigned int dstlen;
struct rte_mbuf *m;
int l, n = srclen;
uint8_t *src;
@@ -986,19 +1247,16 @@ process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
if (m == 0)
goto process_auth_err;
- if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
- goto process_auth_err;
-
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
- if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, srclen) != 1)
goto process_auth_err;
goto process_auth_final;
}
- if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
goto process_auth_err;
n -= l;
@@ -1006,13 +1264,16 @@ process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
- if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
goto process_auth_err;
n -= l;
}
process_auth_final:
- if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
+ if (HMAC_Final(ctx, dst, &dstlen) != 1)
+ goto process_auth_err;
+
+ if (unlikely(HMAC_Init_ex(ctx, NULL, 0, NULL, NULL) != 1))
goto process_auth_err;
return 0;
@@ -1032,8 +1293,9 @@ process_openssl_combined_op
{
/* cipher */
uint8_t *dst = NULL, *iv, *tag, *aad;
- int srclen, ivlen, aadlen, status = -1;
+ int srclen, aadlen, status = -1;
uint32_t offset;
+ uint8_t taglen;
/*
* Segmented destination buffer is not supported for
@@ -1046,7 +1308,6 @@ process_openssl_combined_op
iv = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
- ivlen = sess->iv.length;
if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
srclen = 0;
offset = op->sym->auth.data.offset;
@@ -1070,18 +1331,34 @@ process_openssl_combined_op
offset + srclen);
}
- if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- status = process_openssl_auth_encryption_gcm(
- mbuf_src, offset, srclen,
- aad, aadlen, iv, ivlen, sess->cipher.key.data,
- dst, tag, sess->cipher.ctx,
- sess->cipher.evp_algo);
- else
- status = process_openssl_auth_decryption_gcm(
- mbuf_src, offset, srclen,
- aad, aadlen, iv, ivlen, sess->cipher.key.data,
- dst, tag, sess->cipher.ctx,
- sess->cipher.evp_algo);
+ taglen = sess->auth.digest_length;
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_encryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_encryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+
+ } else {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_decryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_decryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+ }
if (status != 0) {
if (status == (-EFAULT) &&
@@ -1122,15 +1399,11 @@ process_openssl_cipher_op
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_cipher_encrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
else
status = process_openssl_cipher_decrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
else
status = process_openssl_cipher_des3ctr(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
@@ -1174,8 +1447,7 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/* Encrypt with the block aligned stream with CBC mode */
status = process_openssl_cipher_encrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx, sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
if (last_block_len) {
/* Point at last block */
dst += srclen;
@@ -1225,9 +1497,7 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/* Decrypt with CBC mode */
status |= process_openssl_cipher_decrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
}
}
@@ -1237,9 +1507,9 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/** Process auth operation */
static void
-process_openssl_auth_op
- (struct rte_crypto_op *op, struct openssl_session *sess,
- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
{
uint8_t *dst;
int srclen, status;
@@ -1247,8 +1517,7 @@ process_openssl_auth_op
srclen = op->sym->auth.data.length;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
- sess->auth.digest_length);
+ dst = qp->temp_digest;
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
@@ -1265,9 +1534,8 @@ process_openssl_auth_op
break;
case OPENSSL_AUTH_AS_HMAC:
status = process_openssl_auth_hmac(mbuf_src, dst,
- op->sym->auth.data.offset, NULL,
- sess->auth.hmac.pkey, srclen,
- sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
+ op->sym->auth.data.offset, srclen,
+ sess->auth.hmac.ctx);
break;
default:
status = -1;
@@ -1279,8 +1547,6 @@ process_openssl_auth_op
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(mbuf_src, sess->auth.digest_length);
}
if (status != 0)
@@ -1289,7 +1555,7 @@ process_openssl_auth_op
/** Process crypto operation for mbuf */
static int
-process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
+process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
struct openssl_session *sess)
{
struct rte_mbuf *msrc, *mdst;
@@ -1305,14 +1571,14 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_ONLY_AUTH:
- process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_CIPHER_AUTH:
process_openssl_cipher_op(op, sess, msrc, mdst);
- process_openssl_auth_op(op, sess, mdst, mdst);
+ process_openssl_auth_op(qp, op, sess, mdst, mdst);
break;
case OPENSSL_CHAIN_AUTH_CIPHER:
- process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_COMBINED:
@@ -1401,19 +1667,12 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
static int
cryptodev_openssl_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct openssl_private *internals;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct openssl_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
OPENSSL_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -1451,11 +1710,12 @@ init_error:
static int
cryptodev_openssl_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct openssl_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -1465,17 +1725,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_openssl_create(name, vdev, &init_params);
}
@@ -1484,17 +1734,18 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
static int
cryptodev_openssl_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD,
- "Closing OPENSSL crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
@@ -1502,10 +1753,13 @@ static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
.remove = cryptodev_openssl_remove
};
+static struct cryptodev_driver openssl_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
cryptodev_openssl_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_openssl_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 8cdd0b2e..c5722399 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -362,6 +362,36 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
{ /* AES GMAC (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -427,6 +457,26 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* DES DOCSIS BPI */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -549,7 +599,7 @@ openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"openssl_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index b7f74752..26bf862c 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -34,6 +34,7 @@
#define _OPENSSL_PMD_PRIVATE_H_
#include <openssl/evp.h>
+#include <openssl/hmac.h>
#include <openssl/des.h>
#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
@@ -59,6 +60,8 @@
#define OPENSSL_LOG_DBG(fmt, args...)
#endif
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
/** OPENSSL operation order mode enumerator */
enum openssl_chain_order {
@@ -103,6 +106,11 @@ struct openssl_qp {
/**< Session Mempool */
struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
/** OPENSSL crypto private session structure */
@@ -164,7 +172,7 @@ struct openssl_session {
/**< pointer to EVP key */
const EVP_MD *evp_algo;
/**< pointer to EVP algorithm function */
- EVP_MD_CTX *ctx;
+ HMAC_CTX *ctx;
/**< pointer to EVP context structure */
} hmac;
};
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 7322ffe4..745d0197 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -43,6 +43,9 @@ CFLAGS += -O3
# external library include paths
CFLAGS += -I$(SRCDIR)/qat_adf
LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index ebe245f6..d03688c7 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -301,6 +301,26 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
+ ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+ | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+ | ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+ ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_config cipher_config;
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 2c8e03c0..802ba95d 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -104,8 +104,8 @@ struct qat_alg_buf_list {
struct qat_crypto_op_cookie {
struct qat_alg_buf_list qat_sgl_list_src;
struct qat_alg_buf_list qat_sgl_list_dst;
- phys_addr_t qat_sgl_src_phys_addr;
- phys_addr_t qat_sgl_dst_phys_addr;
+ rte_iova_t qat_sgl_src_phys_addr;
+ rte_iova_t qat_sgl_dst_phys_addr;
};
/* Common content descriptor */
@@ -124,7 +124,7 @@ struct qat_session {
void *bpi_ctx;
struct qat_alg_cd cd;
uint8_t *cd_cur_ptr;
- phys_addr_t cd_paddr;
+ rte_iova_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 2d16c9e2..db6c9a32 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -124,6 +124,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_NULL:
return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -876,6 +879,31 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_HW_AUTH_ALGO_NULL);
state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
+ state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
+ ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
+
+ if (aad_length > 0) {
+ aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ } else {
+ auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
+ }
+
+ cdesc->aad_len = aad_length;
+ hash->auth_counter.counter = 0;
+
+ hash_cd_ctrl->outer_prefix_sz = digestsize;
+ auth_param->hash_state_sz = digestsize;
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 1f52cabf..a572967c 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -44,15 +44,12 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
-#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -60,7 +57,10 @@
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
-#include <rte_cryptodev_pci.h>
+#include <rte_byteorder.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
#include <openssl/evp.h>
#include "qat_logs.h"
@@ -253,10 +253,21 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
/* AEAD */
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
}
if (xform->next == NULL)
@@ -516,7 +527,7 @@ qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
/* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ session->cd_paddr = rte_mempool_virt2iova(session) +
offsetof(struct qat_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
@@ -736,6 +747,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
struct qat_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
/*
* Store AEAD IV parameters as cipher IV,
@@ -755,21 +767,33 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
- aead_xform->algo);
- return -ENOTSUP;
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
- if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
/*
* It needs to create cipher desc content first,
* then authentication
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
if (qat_alg_aead_session_create_content_desc_cipher(session,
aead_xform->key.data,
aead_xform->key.length))
@@ -780,7 +804,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_GENERATE))
+ crypto_operation))
return -EINVAL;
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
@@ -788,12 +812,16 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
* It needs to create authentication desc content first,
* then cipher
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
if (qat_alg_aead_session_create_content_desc_auth(session,
aead_xform->key.data,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_VERIFY))
+ crypto_operation))
return -EINVAL;
if (qat_alg_aead_session_create_content_desc_cipher(session,
@@ -923,6 +951,14 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
return sym_op->cipher.data.length - last_block_len;
}
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
@@ -946,10 +982,10 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- - queue->max_inflights;
+ tmp_qp->inflights16 += nb_ops;
+ overflow = tmp_qp->inflights16 - queue->max_inflights;
if (overflow > 0) {
- rte_atomic16_sub(&tmp_qp->inflights16, overflow);
+ tmp_qp->inflights16 -= overflow;
nb_ops_possible = nb_ops - overflow;
if (nb_ops_possible == 0)
return 0;
@@ -964,8 +1000,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
* This message cannot be enqueued,
* decrease number of ops that wasn't sent
*/
- rte_atomic16_sub(&tmp_qp->inflights16,
- nb_ops_possible - nb_ops_sent);
+ tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
if (nb_ops_sent == 0)
return 0;
goto kick_tail;
@@ -976,26 +1011,59 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
cur_op++;
}
kick_tail:
- WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, tail);
queue->tail = tail;
tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
return nb_ops_sent;
}
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
uint16_t
qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct qat_queue *queue;
+ struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
+ uint32_t head;
- queue = &(tmp_qp->rx_q);
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr + queue->head);
+ ((uint8_t *)rx_queue->base_addr + head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_ops) {
@@ -1005,7 +1073,6 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
-
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@@ -1022,23 +1089,26 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
- *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
- queue->head = adf_modulo(queue->head +
- queue->msg_size,
- ADF_RING_SIZE_MODULO(queue->queue_size));
+ head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr +
- queue->head);
+ ((uint8_t *)rx_queue->base_addr + head);
*ops = rx_op;
ops++;
msg_counter++;
}
if (msg_counter > 0) {
- WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
- queue->hw_bundle_number,
- queue->hw_queue_number, queue->head);
- rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
+ rx_queue->head = head;
tmp_qp->stats.dequeued_count += msg_counter;
+ rx_queue->nb_processed_responses += msg_counter;
+ tmp_qp->inflights16 -= msg_counter;
+
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
}
return msg_counter;
}
@@ -1049,7 +1119,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
{
int nr = 1;
- uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
buff_start + rte_pktmbuf_data_len(buf);
list->bufers[0].addr = buff_start;
@@ -1073,7 +1143,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
list->bufers[nr].len = rte_pktmbuf_data_len(buf);
list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+ list->bufers[nr].addr = rte_pktmbuf_iova(buf);
buf_len += list->bufers[nr].len;
buf = buf->next;
@@ -1112,6 +1182,29 @@ set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
}
}
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
@@ -1156,6 +1249,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
+
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -1164,9 +1259,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM */
+ /* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
do_aead = 1;
} else {
do_auth = 1;
@@ -1273,6 +1372,11 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
@@ -1286,6 +1390,87 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected memory
+ * or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad data,
+ * then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET
+ + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
}
cipher_len = op->sym->aead.data.length;
@@ -1293,10 +1478,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_len = op->sym->aead.data.length;
auth_ofs = op->sym->aead.data.offset;
- auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}
@@ -1316,26 +1499,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
* so as not to overwrite data in dest buffer
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
& QAT_64_BTYE_ALIGN_MASK;
- if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
rte_pktmbuf_headroom(op->sym->m_src))
> src_buf_start)) {
/* alignment has pushed addr ahead of start of mbuf
* so revert and take the performance hit
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ rte_pktmbuf_iova_offset(op->sym->m_src,
min_ofs);
}
dst_buf_start = src_buf_start;
@@ -1343,7 +1526,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (do_cipher || do_aead) {
cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_mtophys_offset(
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
cipher_param->cipher_length = cipher_len;
} else {
@@ -1352,7 +1535,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
} else {
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 3f35a00e..c64d7756 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -50,6 +50,13 @@
(((num) + (align) - 1) & ~((align) - 1))
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
struct qat_session;
enum qat_device_gen {
@@ -63,7 +70,7 @@ enum qat_device_gen {
struct qat_queue {
char memz_name[RTE_MEMZONE_NAMESIZE];
void *base_addr; /* Base address */
- phys_addr_t base_phys_addr; /* Queue physical address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
uint32_t head; /* Shadow copy of the head */
uint32_t tail; /* Shadow copy of the tail */
uint32_t modulo;
@@ -73,11 +80,17 @@ struct qat_queue {
uint8_t hw_bundle_number;
uint8_t hw_queue_number;
/* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+ uint16_t nb_pending_requests;
+ /* number of requests pending since last CSR tail write */
};
struct qat_qp {
void *mmap_bar_addr;
- rte_atomic16_t inflights16;
+ uint16_t inflights16;
struct qat_queue tx_q;
struct qat_queue rx_q;
struct rte_cryptodev_stats stats;
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 70120072..89ba27d4 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -48,9 +48,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 20, \
+ .min = 1, \
.max = 20, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -69,9 +69,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 28, \
+ .min = 1, \
.max = 28, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -90,9 +90,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 32, \
+ .min = 1, \
.max = 32, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -111,9 +111,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 48, \
+ .min = 1, \
.max = 48, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -132,9 +132,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -153,9 +153,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 16, \
+ .min = 1, \
.max = 16, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -183,6 +183,36 @@
}, } \
}, } \
}, \
+ { /* AES CCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_CCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 16, \
+ .increment = 2 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 224, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 7, \
+ .max = 13, \
+ .increment = 1 \
+ }, \
+ }, } \
+ }, } \
+ }, \
{ /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 5048d214..ced3aa6a 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -37,6 +37,7 @@
#include <rte_memzone.h>
#include <rte_cryptodev_pmd.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
@@ -122,14 +123,9 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
break;
default:
memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
-}
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(queue_name, queue_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
+ }
return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
memzone_flags, queue_size);
-#endif
}
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
@@ -186,7 +182,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
RTE_CACHE_LINE_SIZE);
qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
- rte_atomic16_init(&qp->inflights16);
+ qp->inflights16 = 0;
if (qat_tx_queue_create(dev, &(qp->tx_q),
queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
@@ -232,14 +228,12 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
qp->op_cookies[i];
sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2phy(qp->op_cookie_pool,
- sql_cookie) +
+ rte_mempool_virt2iova(sql_cookie) +
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_src);
sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2phy(qp->op_cookie_pool,
- sql_cookie) +
+ rte_mempool_virt2iova(sql_cookie) +
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_dst);
}
@@ -269,7 +263,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
}
/* Don't free memory if there are still responses to be processed */
- if (rte_atomic16_read(&(qp->inflights16)) == 0) {
+ if (qp->inflights16 == 0) {
qat_queue_delete(&(qp->tx_q));
qat_queue_delete(&(qp->rx_q));
} else {
@@ -377,7 +371,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
}
queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->phys_addr;
+ queue->base_phys_addr = qp_mz->iova;
if (qat_qp_check_queue_alignment(queue->base_phys_addr,
queue_size_bytes)) {
PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 7d56fca4..4f8e4bfe 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_dev.h>
#include <rte_malloc.h>
+#include <rte_pci.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_pci.h>
#include "qat_crypto.h"
#include "qat_logs.h"
@@ -97,15 +98,18 @@ static const struct rte_pci_id pci_id_qat_map[] = {
};
static int
-crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
+crypto_qat_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
{
+ struct rte_cryptodev *cryptodev;
struct qat_pmd_private *internals;
PMD_INIT_FUNC_TRACE();
- PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
- RTE_DEV_TO_PCI(cryptodev->device)->addr.bus,
- RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
- RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
@@ -119,8 +123,8 @@ crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
- switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+ switch (pci_dev->id.device_id) {
case 0x0443:
internals->qat_dev_gen = QAT_GEN1;
internals->qat_dev_capabilities = qat_gen1_capabilities;
@@ -151,15 +155,43 @@ crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
}
static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
+ struct rte_pci_device *pci_dev)
{
- return rte_cryptodev_pci_generic_probe(pci_dev,
- sizeof(struct qat_pmd_private), crypto_qat_dev_init);
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct qat_pmd_private),
+ .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_qat_create(name, pci_dev, &init_params);
}
static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_cryptodev_pci_generic_remove(pci_dev, NULL);
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ /* free crypto device */
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_pci_driver rte_qat_pmd = {
@@ -169,6 +201,9 @@ static struct rte_pci_driver rte_qat_pmd = {
.remove = crypto_qat_pci_remove
};
+static struct cryptodev_driver qat_crypto_drv;
+
RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_qat_pmd, cryptodev_qat_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
+ cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index b045410c..123b0f6d 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -36,6 +36,9 @@ LIB = librte_pmd_crypto_scheduler.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev -lrte_kvargs -lrte_reorder
+LDLIBS += -lrte_bus_vdev
# library version
LIBABIVER := 1
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 400fc4f1..acdf6361 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -33,8 +33,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
@@ -45,7 +44,7 @@
uint8_t cryptodev_driver_id;
struct scheduler_init_params {
- struct rte_crypto_vdev_init_params def_p;
+ struct rte_cryptodev_pmd_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
@@ -107,21 +106,18 @@ cryptodev_scheduler_create(const char *name,
uint32_t i;
int ret;
- if (init_params->def_p.name[0] == '\0')
- snprintf(init_params->def_p.name,
- sizeof(init_params->def_p.name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name,
- sizeof(struct scheduler_ctx),
- init_params->def_p.socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->def_p);
if (dev == NULL) {
CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
+ if (init_params->wcmask != 0)
+ RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
+ init_params->wcmask);
+
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -240,10 +236,7 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
sched_ctx->slaves[i].dev_id);
}
- RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
- "socket %u\n", name, rte_socket_id());
-
- return 0;
+ return rte_cryptodev_pmd_destroy(dev);
}
/** Parse integer from integer argument */
@@ -304,7 +297,7 @@ static int
parse_name_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
- struct rte_crypto_vdev_init_params *params = extra_args;
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
CS_LOG_ERR("Invalid name %s, should be less than "
@@ -462,10 +455,11 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
{
struct scheduler_init_params init_params = {
.def_p = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ "",
+ sizeof(struct scheduler_ctx),
rte_socket_id(),
- ""
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
},
.nb_slaves = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
@@ -481,19 +475,6 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
scheduler_parse_init_params(&init_params,
rte_vdev_device_args(vdev));
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
- name,
- init_params.def_p.socket_id);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.def_p.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.def_p.max_nb_sessions);
- if (init_params.def_p.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.def_p.name);
- if (init_params.wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params.wcmask);
return cryptodev_scheduler_create(name,
vdev,
@@ -505,6 +486,8 @@ static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
.remove = cryptodev_scheduler_remove
};
+static struct cryptodev_driver scheduler_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
cryptodev_scheduler_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
@@ -512,5 +495,6 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
+ cryptodev_scheduler_pmd_drv,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index d3795346..d9b52352 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -31,13 +31,11 @@
*/
#include <string.h>
-#include <rte_config.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
#include <rte_reorder.h>
#include "scheduler_pmd_private.h"
@@ -347,7 +345,7 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
- UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
+ UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
uint32_t i;
if (!dev_info)
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
index ecee80df..183c6ce9 100644
--- a/drivers/crypto/snow3g/Makefile
+++ b/drivers/crypto/snow3g/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index dad45068..4cc9a94f 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -31,19 +31,16 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_snow3g_pmd_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_DIGEST_LENGTH 4
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
@@ -263,7 +260,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_snow3g_hash_op(struct rte_crypto_op **ops,
+process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
struct snow3g_session *session,
uint8_t num_ops)
{
@@ -289,8 +286,7 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- SNOW3G_DIGEST_LENGTH);
+ dst = qp->temp_digest;
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
iv, src,
@@ -299,10 +295,6 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
SNOW3G_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- SNOW3G_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
@@ -346,16 +338,16 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
session, num_ops);
break;
case SNOW3G_OP_ONLY_AUTH:
- processed_ops = process_snow3g_hash_op(ops, session,
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
num_ops);
break;
case SNOW3G_OP_CIPHER_AUTH:
processed_ops = process_snow3g_cipher_op(ops, session,
num_ops);
- process_snow3g_hash_op(ops, session, processed_ops);
+ process_snow3g_hash_op(qp, ops, session, processed_ops);
break;
case SNOW3G_OP_AUTH_CIPHER:
- processed_ops = process_snow3g_hash_op(ops, session,
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
num_ops);
process_snow3g_cipher_op(ops, session, processed_ops);
break;
@@ -403,15 +395,15 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
session);
break;
case SNOW3G_OP_ONLY_AUTH:
- processed_op = process_snow3g_hash_op(&op, session, 1);
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
break;
case SNOW3G_OP_CIPHER_AUTH:
processed_op = process_snow3g_cipher_op_bit(op, session);
if (processed_op == 1)
- process_snow3g_hash_op(&op, session, 1);
+ process_snow3g_hash_op(qp, &op, session, 1);
break;
case SNOW3G_OP_AUTH_CIPHER:
- processed_op = process_snow3g_hash_op(&op, session, 1);
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
if (processed_op == 1)
process_snow3g_cipher_op_bit(op, session);
break;
@@ -565,19 +557,13 @@ static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
static int
cryptodev_snow3g_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct snow3g_private *internals;
uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct snow3g_private), init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
SNOW3G_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -611,11 +597,12 @@ init_error:
static int
cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct snow3g_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -625,17 +612,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_snow3g_create(name, vdev, &init_params);
}
@@ -643,17 +620,18 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
static int
cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
@@ -661,10 +639,13 @@ static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
.remove = cryptodev_snow3g_remove
};
+static struct cryptodev_driver snow3g_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_snow3g_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index ae9569c8..157a2de3 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -184,7 +184,7 @@ snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"snow3g_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index fba3cb86..7b9729f0 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -58,6 +58,8 @@
#define SNOW3G_LOG_DBG(fmt, args...)
#endif
+#define SNOW3G_DIGEST_LENGTH 4
+
/** private data structure for each virtual SNOW 3G device */
struct snow3g_private {
unsigned max_nb_queue_pairs;
@@ -78,6 +80,11 @@ struct snow3g_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum snow3g_operation {
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
index f543b407..af77bc8a 100644
--- a/drivers/crypto/zuc/Makefile
+++ b/drivers/crypto/zuc/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_ZUC_PATH)
CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index b301711e..590224bf 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -31,18 +31,15 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_zuc_pmd_private.h"
-#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 8
#define BYTE_LEN 8
@@ -258,7 +255,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_zuc_hash_op(struct rte_crypto_op **ops,
+process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
struct zuc_session *session,
uint8_t num_ops)
{
@@ -285,8 +282,7 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ZUC_DIGEST_LENGTH);
+ dst = (uint32_t *)qp->temp_digest;
sso_zuc_eia3_1_buffer(session->pKey_hash,
iv, src,
@@ -295,10 +291,6 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
ZUC_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- ZUC_DIGEST_LENGTH);
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
@@ -327,16 +319,16 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
session, num_ops);
break;
case ZUC_OP_ONLY_AUTH:
- processed_ops = process_zuc_hash_op(ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, session,
num_ops);
break;
case ZUC_OP_CIPHER_AUTH:
processed_ops = process_zuc_cipher_op(ops, session,
num_ops);
- process_zuc_hash_op(ops, session, processed_ops);
+ process_zuc_hash_op(qp, ops, session, processed_ops);
break;
case ZUC_OP_AUTH_CIPHER:
- processed_ops = process_zuc_hash_op(ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, session,
num_ops);
process_zuc_cipher_op(ops, session, processed_ops);
break;
@@ -469,19 +461,14 @@ static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
static int
cryptodev_zuc_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct zuc_private *internals;
uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct zuc_private), init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
ZUC_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -515,11 +502,12 @@ init_error:
static int
cryptodev_zuc_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct zuc_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -529,17 +517,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_zuc_create(name, vdev, &init_params);
}
@@ -547,17 +525,19 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
static int
cryptodev_zuc_remove(struct rte_vdev_device *vdev)
{
+
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing ZUC crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
@@ -565,9 +545,12 @@ static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
.remove = cryptodev_zuc_remove
};
+static struct cryptodev_driver zuc_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_zuc_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 52c6aed8..243c0991 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -184,7 +184,7 @@ zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"zuc_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index b706e0aa..a57b8cd0 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -59,6 +59,8 @@
#endif
#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+
/** private data structure for each virtual ZUC device */
struct zuc_private {
unsigned max_nb_queue_pairs;
@@ -79,6 +81,11 @@ struct zuc_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum zuc_operation {
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 3f6b8988..1f9c0ba2 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -31,15 +31,9 @@
include $(RTE_SDK)/mk/rte.vars.mk
-core-libs := librte_eal librte_eventdev
-
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
-DEPDIRS-skeleton = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
-DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
-DEPDIRS-octeontx = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
-DEPDIRS-dpaa2 = $(core-libs) librte_bus_fslmc
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 3497d09d..f34eebfa 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -45,6 +45,10 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_bus_fslmc -lrte_pmd_dpaa2
+LDLIBS += -lrte_bus_vdev
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
# versioning export map
EXPORT_MAP := rte_pmd_dpaa2_event_version.map
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index cf2d2741..eeeb2312 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -50,14 +50,16 @@
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_pci.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
#include <dpaa2_hw_dpio.h>
+#include <dpaa2_ethdev.h>
#include "dpaa2_eventdev.h"
#include <portal/dpaa2_hw_pvt.h>
#include <mc/fsl_dpci.h>
@@ -137,14 +139,23 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
*/
struct rte_event *ev_temp = rte_malloc(NULL,
sizeof(struct rte_event), 0);
+
+ if (!ev_temp) {
+ if (!loop)
+ return num_tx;
+ frames_to_send = loop;
+ PMD_DRV_LOG(ERR, "Unable to allocate memory");
+ goto send_partial;
+ }
rte_memcpy(ev_temp, event, sizeof(struct rte_event));
DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
DPAA2_SET_FD_LEN((&fd_arr[loop]),
sizeof(struct rte_event));
}
+send_partial:
loop = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_enqueue_multiple_eqdesc(swp,
+ loop += qbman_swp_enqueue_multiple_desc(swp,
&eqdesc[loop], &fd_arr[loop],
frames_to_send - loop);
}
@@ -189,10 +200,14 @@ RETRY:
static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
struct rte_event *ev)
{
struct rte_event *ev_temp =
(struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+
+ RTE_SET_USED(rxq);
+
rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
rte_free(ev_temp);
@@ -202,6 +217,7 @@ static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
struct rte_event *ev)
{
struct rte_event *ev_temp =
@@ -209,6 +225,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
RTE_SET_USED(swp);
+ RTE_SET_USED(rxq);
rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
rte_free(ev_temp);
@@ -265,7 +282,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
if (rxq) {
- rxq->cb(swp, fd, dq, &ev[num_pkts]);
+ rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
} else {
qbman_swp_dqrr_consume(swp, dq);
PMD_DRV_LOG(ERR, "Null Return VQ received\n");
@@ -378,8 +395,8 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
RTE_SET_USED(queue_conf);
queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
- queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
- RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
+ RTE_SCHED_TYPE_PARALLEL;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
@@ -551,6 +568,147 @@ dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
RTE_SET_USED(f);
}
+static int
+dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa2"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int i, ret;
+
+ PMD_DRV_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_attach(eth_dev, i,
+ dpcon_id, queue_conf);
+ if (ret) {
+ PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
+ ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ int ret;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_add_all(dev,
+ eth_dev, queue_conf);
+
+ ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
+ dpcon_id, queue_conf);
+ if (ret) {
+ PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int i, ret;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_detach(eth_dev, i);
+ if (ret) {
+ PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
+
+ ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret) {
+ PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
@@ -566,7 +724,12 @@ static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
.port_link = dpaa2_eventdev_port_link,
.port_unlink = dpaa2_eventdev_port_unlink,
.timeout_ticks = dpaa2_eventdev_timeout_ticks,
- .dump = dpaa2_eventdev_dump
+ .dump = dpaa2_eventdev_dump,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
};
static int
@@ -621,7 +784,6 @@ dpaa2_eventdev_create(const char *name)
}
eventdev->dev_ops = &dpaa2_eventdev_ops;
- eventdev->schedule = NULL;
eventdev->enqueue = dpaa2_eventdev_enqueue;
eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index f79f78aa..ae8e07e9 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -74,6 +74,14 @@ enum {
DPAA2_EVENT_DPCI_MAX_QUEUES
};
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev with DPAA2 devices.
+ */
+
struct dpaa2_dpcon_dev {
TAILQ_ENTRY(dpaa2_dpcon_dev) next;
struct fsl_mc_io dpcon;
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index d3e73f90..005e6234 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -46,7 +46,7 @@
#include <rte_dev.h>
#include <rte_ethdev.h>
-#include <fslmc_vfio.h>
+#include <rte_fslmc.h>
#include <mc/fsl_dpcon.h>
#include <portal/dpaa2_hw_pvt.h>
#include "dpaa2_eventdev.h"
@@ -56,9 +56,9 @@ static struct dpcon_dev_list dpcon_dev_list
= TAILQ_HEAD_INITIALIZER(dpcon_dev_list); /*!< DPCON device list */
static int
-rte_dpaa2_create_dpcon_device(struct fslmc_vfio_device *vdev __rte_unused,
+rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
struct vfio_device_info *obj_info __rte_unused,
- int dpcon_id)
+ int dpcon_id)
{
struct dpaa2_dpcon_dev *dpcon_node;
struct dpcon_attr attr;
@@ -100,7 +100,7 @@ rte_dpaa2_create_dpcon_device(struct fslmc_vfio_device *vdev __rte_unused,
TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
- PMD_DRV_LOG(DEBUG, "DPAA2: Added [dpcon.%d]", dpcon_id);
+ RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpcon.%d]\n", dpcon_id);
return 0;
}
@@ -132,7 +132,7 @@ void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon)
}
static struct rte_dpaa2_object rte_dpaa2_dpcon_obj = {
- .object_id = DPAA2_MC_DPCON_DEVID,
+ .dev_type = DPAA2_CON,
.create = rte_dpaa2_create_dpcon_device,
};
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index e5661ca8..fdf1b738 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -38,6 +38,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx_ssovf.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
+
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
@@ -48,8 +54,6 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_mbox.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
@@ -64,7 +68,4 @@ else
CFLAGS_ssovf_worker.o += -Ofast
endif
-# install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)-include := rte_pmd_octeontx_ssovf.h
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
index 3810a03f..5352e7e3 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
@@ -1,9 +1,3 @@
DPDK_17.05 {
- global:
-
- octeontx_ssovf_info;
- octeontx_ssovf_bar;
- octeontx_ssovf_mbox_send;
-
local: *;
};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index d829b491..117b1453 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -36,12 +36,13 @@
#include <rte_debug.h>
#include <rte_dev.h>
#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include "ssovf_evdev.h"
@@ -155,7 +156,6 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
- dev->schedule = NULL;
dev->enqueue = ssows_enq;
dev->enqueue_burst = ssows_enq_burst;
dev->enqueue_new_burst = ssows_enq_new_burst;
@@ -395,6 +395,123 @@ ssows_dump(struct ssows *ws, FILE *f)
fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
}
+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int ret;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+ return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret = 0;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ pki_mod_qos_t pki_qos;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return -EINVAL;
+
+ if (rx_queue_id >= 0)
+ return -EINVAL;
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+ return -ENOTSUP;
+
+ memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+ pki_qos.port_type = 0;
+ pki_qos.index = 0;
+ pki_qos.mmask.f_tag_type = 1;
+ pki_qos.mmask.f_port_add = 1;
+ pki_qos.mmask.f_grp_ok = 1;
+ pki_qos.mmask.f_grp_bad = 1;
+ pki_qos.mmask.f_grptag_ok = 1;
+ pki_qos.mmask.f_grptag_bad = 1;
+
+ pki_qos.tag_type = queue_conf->ev.sched_type;
+ pki_qos.qos_entry.port_add = 0;
+ pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+ pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+ pki_qos.qos_entry.grptag_bad = 0;
+ pki_qos.qos_entry.grptag_ok = 0;
+
+ ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+ if (ret < 0)
+ ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+ nic->port_id, queue_conf->ev.queue_id);
+
+ return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+ int ret = 0;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ pki_del_qos_t pki_qos;
+ RTE_SET_USED(dev);
+ RTE_SET_USED(rx_queue_id);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return -EINVAL;
+
+ pki_qos.port_type = 0;
+ pki_qos.index = 0;
+ memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+ ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+ if (ret < 0)
+ ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+ nic->port_id, queue_conf->ev.queue_id);
+ return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return 0;
+ octeontx_pki_port_start(nic->port_id);
+ return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ RTE_SET_USED(dev);
+
+ ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+ if (ret)
+ return 0;
+ octeontx_pki_port_stop(nic->port_id);
+ return 0;
+}
+
static void
ssovf_dump(struct rte_eventdev *dev, FILE *f)
{
@@ -488,6 +605,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
.port_link = ssovf_port_link,
.port_unlink = ssovf_port_unlink,
.timeout_ticks = ssovf_timeout_ticks,
+
+ .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+ .eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+ .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
.dump = ssovf_dump,
.dev_start = ssovf_start,
.dev_stop = ssovf_stop,
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 1cdc8104..b093a3e7 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -33,11 +33,11 @@
#ifndef __SSOVF_EVDEV_H__
#define __SSOVF_EVDEV_H__
-#include <rte_config.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_io.h>
-#include "rte_pmd_octeontx_ssovf.h"
+#include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>
#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
@@ -58,10 +58,6 @@
RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
-#define PCI_VENDOR_ID_CAVIUM 0x177D
-#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
-#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
-
#define SSO_MAX_VHGRP (64)
#define SSO_MAX_VHWS (32)
@@ -76,7 +72,6 @@
#define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
#define SSO_VHGRP_AQ_CNT (0x1C0ULL)
#define SSO_VHGRP_AQ_THR (0x1E0ULL)
-#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
/* BAR2 */
#define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
@@ -107,8 +102,6 @@
#define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
#define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
-#define SSOW_BAR4_LEN (64 * 1024)
-
/* Mailbox message constants */
#define SSO_COPROC 0x2
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 55f72555..bf76ac88 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -34,7 +34,10 @@
#include <rte_common.h>
#include <rte_branch_prediction.h>
+#include <octeontx_mbox.h>
+
#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
enum {
SSO_SYNC_ORDERED,
@@ -49,6 +52,28 @@ enum {
/* SSO Operations */
+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+{
+ struct rte_mbuf *mbuf;
+ octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+ rte_prefetch_non_temporal(wqe);
+
+ /* Get mbuf from wqe */
+ mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+ OCTTX_PACKET_WQE_SKIP);
+ mbuf->packet_type =
+ ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+ mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+ mbuf->pkt_len = wqe->s.w1.len;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->port = port_id;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ return mbuf;
+}
+
static __rte_always_inline uint16_t
ssows_get_work(struct ssows *ws, struct rte_event *ev)
{
@@ -61,9 +86,14 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
ws->cur_tt = sched_type_queue & 0x3;
ws->cur_grp = sched_type_queue >> 2;
sched_type_queue = sched_type_queue << 38;
-
ev->event = sched_type_queue | (get_work0 & 0xffffffff);
- ev->u64 = get_work1;
+ if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
+ ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev->event >> 20) & 0xF);
+ } else {
+ ev->u64 = get_work1;
+ }
+
return !!get_work1;
}
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index e6d58711..a24738b1 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -38,6 +38,9 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_skeleton_event.a
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_skeleton_event_version.map
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index bcd20556..bb554c36 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -45,9 +45,8 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_lcore.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include "skeleton_eventdev.h"
@@ -375,7 +374,6 @@ skeleton_eventdev_init(struct rte_eventdev *eventdev)
PMD_DRV_FUNC_TRACE();
eventdev->dev_ops = &skeleton_eventdev_ops;
- eventdev->schedule = NULL;
eventdev->enqueue = skeleton_eventdev_enqueue;
eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
eventdev->dequeue = skeleton_eventdev_dequeue;
@@ -466,7 +464,6 @@ skeleton_eventdev_create(const char *name, int socket_id)
}
eventdev->dev_ops = &skeleton_eventdev_ops;
- eventdev->schedule = NULL;
eventdev->enqueue = skeleton_eventdev_enqueue;
eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
eventdev->dequeue = skeleton_eventdev_dequeue;
diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile
index 857a87cc..2f2b67ba 100644
--- a/drivers/event/sw/Makefile
+++ b/drivers/event/sw/Makefile
@@ -43,12 +43,14 @@ ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
CFLAGS += -Wno-missing-field-initializers
endif
endif
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs -lrte_ring
+LDLIBS += -lrte_bus_vdev
# library version
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_pmd_evdev_sw_version.map
+EXPORT_MAP := rte_pmd_sw_event_version.map
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
diff --git a/drivers/event/sw/rte_pmd_evdev_sw_version.map b/drivers/event/sw/rte_pmd_sw_event_version.map
index 5352e7e3..5352e7e3 100644
--- a/drivers/event/sw/rte_pmd_evdev_sw_version.map
+++ b/drivers/event/sw/rte_pmd_sw_event_version.map
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 9c534b7f..fd110797 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -33,8 +33,7 @@
#include <inttypes.h>
#include <string.h>
-#include <rte_vdev.h>
-#include <rte_memzone.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_ring.h>
#include <rte_errno.h>
@@ -345,28 +344,14 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
{
int type;
- /* SINGLE_LINK can be OR-ed with other types, so handle first */
+ type = conf->schedule_type;
+
if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
type = SW_SCHED_TYPE_DIRECT;
- } else {
- switch (conf->event_queue_cfg) {
- case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
- type = RTE_SCHED_TYPE_ATOMIC;
- break;
- case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
- type = RTE_SCHED_TYPE_ORDERED;
- break;
- case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
- type = RTE_SCHED_TYPE_PARALLEL;
- break;
- case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
- SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
- return -ENOTSUP;
- default:
- SW_LOG_ERR("Unknown queue type %d requested\n",
- conf->event_queue_cfg);
- return -EINVAL;
- }
+ } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ & conf->event_queue_cfg) {
+ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+ return -ENOTSUP;
}
struct sw_evdev *sw = sw_pmd_priv(dev);
@@ -400,7 +385,7 @@ sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
static const struct rte_event_queue_conf default_conf = {
.nb_atomic_flows = 4096,
.nb_atomic_order_sequences = 1,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
};
@@ -437,6 +422,19 @@ sw_dev_configure(const struct rte_eventdev *dev)
return 0;
}
+struct rte_eth_dev;
+
+static int
+sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ return 0;
+}
+
static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
{
@@ -616,11 +614,14 @@ sw_start(struct rte_eventdev *dev)
unsigned int i, j;
struct sw_evdev *sw = sw_pmd_priv(dev);
+ rte_service_component_runstate_set(sw->service_id, 1);
+
/* check a service core is mapped to this service */
- struct rte_service_spec *s = rte_service_get_by_name(sw->service_name);
- if (!rte_service_is_running(s))
+ if (!rte_service_runstate_get(sw->service_id)) {
SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
- s->name);
+ sw->service_name);
+ return -ENOENT;
+ }
/* check all ports are set up */
for (i = 0; i < sw->port_count; i++)
@@ -752,6 +753,8 @@ sw_probe(struct rte_vdev_device *vdev)
.port_link = sw_port_link,
.port_unlink = sw_port_unlink,
+ .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+
.xstats_get = sw_xstats_get,
.xstats_get_names = sw_xstats_get_names,
.xstats_get_by_name = sw_xstats_get_by_name,
@@ -833,7 +836,6 @@ sw_probe(struct rte_vdev_device *vdev)
dev->enqueue_forward_burst = sw_event_enqueue_burst;
dev->dequeue = sw_event_dequeue;
dev->dequeue_burst = sw_event_dequeue_burst;
- dev->schedule = sw_event_schedule;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@@ -855,12 +857,15 @@ sw_probe(struct rte_vdev_device *vdev)
service.callback = sw_sched_service_func;
service.callback_userdata = (void *)dev;
- int32_t ret = rte_service_register(&service);
+ int32_t ret = rte_service_component_register(&service, &sw->service_id);
if (ret) {
SW_LOG_ERR("service register() failed");
return -ENOEXEC;
}
+ dev->data->service_inited = 1;
+ dev->data->service_id = sw->service_id;
+
return 0;
}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index 71de3c14..e0dec910 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -278,6 +278,7 @@ struct sw_evdev {
uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint32_t service_id;
char service_name[SW_PMD_NAME_MAX];
};
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index d76d3d5c..b3b3b17e 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -85,14 +85,18 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
struct sw_port *p = port;
struct sw_evdev *sw = (void *)p->sw;
uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
-
- if (unlikely(p->inflight_max < sw_inflights))
- return 0;
+ int new = 0;
if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
num = PORT_ENQUEUE_MAX_BURST_SIZE;
- if (p->inflight_credits < num) {
+ for (i = 0; i < num; i++)
+ new += (ev[i].op == RTE_EVENT_OP_NEW);
+
+ if (unlikely(new > 0 && p->inflight_max < sw_inflights))
+ return 0;
+
+ if (p->inflight_credits < new) {
/* check if event enqueue brings port over max threshold */
uint32_t credit_update_quanta = sw->credit_update_quanta;
if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
@@ -101,7 +105,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
rte_atomic32_add(&sw->inflights, credit_update_quanta);
p->inflight_credits += (credit_update_quanta);
- if (p->inflight_credits < num)
+ if (p->inflight_credits < new)
return 0;
}
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 8cb6d88d..61a5c33b 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -538,7 +538,7 @@ sw_xstats_get(const struct rte_eventdev *dev,
{
struct sw_evdev *sw = sw_pmd_priv(dev);
const uint32_t reset = 0;
- const uint32_t ret_n_lt_stats = 1;
+ const uint32_t ret_n_lt_stats = 0;
return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
reset, ret_n_lt_stats);
}
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index efd55f23..f656c566 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -30,13 +30,10 @@
include $(RTE_SDK)/mk/rte.vars.mk
-core-libs := librte_eal librte_mempool librte_ring
-
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
-DEPDIRS-dpaa2 = $(core-libs)
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
-DEPDIRS-ring = $(core-libs)
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
-DEPDIRS-stack = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
new file mode 100644
index 00000000..c49b0ee3
--- /dev/null
+++ b/drivers/mempool/dpaa/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright 2016 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -D _GNU_SOURCE
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
new file mode 100644
index 00000000..f5ee80f2
--- /dev/null
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -0,0 +1,284 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <dpaa_mempool.h>
+
+struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+
+static int
+dpaa_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct bman_pool *bp;
+ struct bm_buffer bufs[8];
+ struct dpaa_bp_info *bp_info;
+ uint8_t bpid;
+ int num_bufs = 0, ret = 0;
+ struct bman_pool_params params = {
+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID
+ };
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ bp = bman_new_pool(&params);
+ if (!bp) {
+ DPAA_MEMPOOL_ERR("bman_new_pool() failed");
+ return -ENODEV;
+ }
+ bpid = bman_get_params(bp)->bpid;
+
+ /* Drain the pool of anything already in it. */
+ do {
+ /* Acquire is all-or-nothing, so we drain in 8s,
+ * then in 1s for the remainder.
+ */
+ if (ret != 1)
+ ret = bman_acquire(bp, bufs, 8, 0);
+ if (ret < 8)
+ ret = bman_acquire(bp, bufs, 1, 0);
+ if (ret > 0)
+ num_bufs += ret;
+ } while (ret > 0);
+ if (num_bufs)
+ DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d",
+ num_bufs, bpid);
+
+ rte_dpaa_bpid_info[bpid].mp = mp;
+ rte_dpaa_bpid_info[bpid].bpid = bpid;
+ rte_dpaa_bpid_info[bpid].size = mp->elt_size;
+ rte_dpaa_bpid_info[bpid].bp = bp;
+ rte_dpaa_bpid_info[bpid].meta_data_size =
+ sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
+ rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info");
+ bman_free_pool(bp);
+ return -ENOMEM;
+ }
+
+ rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid],
+ sizeof(struct dpaa_bp_info));
+ mp->pool_data = (void *)bp_info;
+
+ DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid);
+ return 0;
+}
+
+static void
+dpaa_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (bp_info) {
+ bman_free_pool(bp_info->bp);
+ DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d",
+ bp_info->bpid);
+ rte_free(mp->pool_data);
+ mp->pool_data = NULL;
+ }
+}
+
+static void
+dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ struct bm_buffer buf;
+ int ret;
+
+ DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
+
+ bm_buffer_set64(&buf, addr);
+retry:
+ ret = bman_release(bp_info->bp, &buf, 1, 0);
+ if (ret) {
+ DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
+ cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
+ goto retry;
+ }
+}
+
+static int
+dpaa_mbuf_free_bulk(struct rte_mempool *pool,
+ void *const *obj_table,
+ unsigned int n)
+{
+ struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+ int ret;
+ unsigned int i = 0;
+
+ DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
+ n, bp_info->bpid);
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return 0;
+ }
+
+ while (i < n) {
+ dpaa_buf_free(bp_info,
+ (uint64_t)rte_mempool_virt2iova(obj_table[i]) +
+ bp_info->meta_data_size);
+ i = i + 1;
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
+ n, bp_info->bpid);
+
+ return 0;
+}
+
+static int
+dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table,
+ unsigned int count)
+{
+ struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
+ struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
+ struct dpaa_bp_info *bp_info;
+ void *bufaddr;
+ int i, ret;
+ unsigned int n = 0;
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
+
+ DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d",
+ count, bp_info->bpid);
+
+ if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
+ DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
+ count);
+ return -1;
+ }
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return -1;
+ }
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
+ ret = bman_acquire(bp_info->bp, bufs,
+ DPAA_MBUF_MAX_ACQ_REL, 0);
+ } else {
+ ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)",
+ ret);
+ /* The API expect the exact number of requested
+ * buffers. Releasing all buffers allocated
+ */
+ dpaa_mbuf_free_bulk(pool, obj_table, n);
+ return -ENOBUFS;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i].addr; i++) {
+ /* TODO-errata - objerved that bufs may be null
+ * i.e. first buffer is valid, remaining 6 buffers
+ * may be null.
+ */
+ bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
+ m[n] = (struct rte_mbuf *)((char *)bufaddr
+ - bp_info->meta_data_size);
+ DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
+ (void *)bufaddr, (void *)m[n]);
+ n++;
+ }
+ }
+
+ DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d",
+ n, bp_info->bpid);
+ return 0;
+}
+
+static unsigned int
+dpaa_mbuf_get_count(const struct rte_mempool *mp)
+{
+ struct dpaa_bp_info *bp_info;
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ return bman_query_free_buffers(bp_info->bp);
+}
+
+struct rte_mempool_ops dpaa_mpool_ops = {
+ .name = "dpaa",
+ .alloc = dpaa_mbuf_create_pool,
+ .free = dpaa_mbuf_free_pool,
+ .enqueue = dpaa_mbuf_free_bulk,
+ .dequeue = dpaa_mbuf_alloc_bulk,
+ .get_count = dpaa_mbuf_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
new file mode 100644
index 00000000..67958594
--- /dev/null
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DPAA_MEMPOOL_H__
+#define __DPAA_MEMPOOL_H__
+
+/* System headers */
+#include <stdio.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <unistd.h>
+
+#include <rte_mempool.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+
+#include <fsl_usd.h>
+#include <fsl_bman.h>
+
+#define CPU_SPIN_BACKOFF_CYCLES 512
+
+/* total number of bpools on SoC */
+#define DPAA_MAX_BPOOLS 256
+
+/* Maximum release/acquire from BMAN */
+#define DPAA_MBUF_MAX_ACQ_REL 8
+
+struct dpaa_bp_info {
+ struct rte_mempool *mp;
+ struct bman_pool *bp;
+ uint32_t bpid;
+ uint32_t size;
+ uint32_t meta_data_size;
+ int32_t dpaa_ops_index;
+};
+
+#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
+ ((struct dpaa_bp_info *)__mp->pool_data)
+
+#define DPAA_MEMPOOL_TO_BPID(__mp) \
+ (((struct dpaa_bp_info *)__mp->pool_data)->bpid)
+
+extern struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
+
+#define DPAA_BPID_TO_POOL_INFO(__bpid) (&rte_dpaa_bpid_info[__bpid])
+
+#endif
diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
new file mode 100644
index 00000000..cc635c73
--- /dev/null
+++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -0,0 +1,8 @@
+DPDK_17.11 {
+ global:
+
+ rte_dpaa_bpid_info;
+ rte_dpaa_pool_table;
+
+ local: *;
+};
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index 1a174968..dd19e100 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -58,5 +58,6 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 6df203fc..8bcbaa89 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -65,7 +65,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
struct dpaa2_bp_info *bp_info;
struct dpbp_attr dpbp_attr;
uint32_t bpid;
- int ret, p_ret;
+ int ret;
avail_dpbp = dpaa2_alloc_dpbp_dev();
@@ -78,7 +78,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
ret = dpaa2_affine_qbman_swp();
if (ret) {
RTE_LOG(ERR, PMD, "Failure in affining portal\n");
- return ret;
+ goto err1;
}
}
@@ -86,7 +86,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
if (ret != 0) {
PMD_INIT_LOG(ERR, "Resource enable failure with"
" err code: %d\n", ret);
- return ret;
+ goto err1;
}
ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
@@ -94,10 +94,16 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
if (ret != 0) {
PMD_INIT_LOG(ERR, "Resource read failure with"
" err code: %d\n", ret);
- p_ret = ret;
- ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
- avail_dpbp->token);
- return p_ret;
+ goto err2;
+ }
+
+ bp_info = rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_info) {
+ PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
+ ret = -ENOMEM;
+ goto err2;
}
/* Allocate the bp_list which will be added into global_bp_list */
@@ -105,7 +111,8 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
RTE_CACHE_LINE_SIZE);
if (!bp_list) {
PMD_INIT_LOG(ERR, "No heap memory available");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err3;
}
/* Set parameters of buffer pool list */
@@ -127,9 +134,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
rte_dpaa2_bpid_info[bpid].bpid = bpid;
- bp_info = rte_malloc(NULL,
- sizeof(struct dpaa2_bp_info),
- RTE_CACHE_LINE_SIZE);
rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid],
sizeof(struct dpaa2_bp_info));
mp->pool_data = (void *)bp_info;
@@ -138,6 +142,14 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
h_bp_list = bp_list;
return 0;
+err3:
+ rte_free(bp_info);
+err2:
+ dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+err1:
+ dpaa2_free_dpbp_dev(avail_dpbp);
+
+ return ret;
}
static void
@@ -213,7 +225,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
/* convert mbuf to buffers for the remainder */
for (i = 0; i < n ; i++) {
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+ bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i])
+ meta_data_size;
#else
bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
@@ -232,7 +244,7 @@ aligned:
for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
bufs[i] = (uint64_t)
- rte_mempool_virt2phy(pool, obj_table[n + i])
+ rte_mempool_virt2iova(obj_table[n + i])
+ meta_data_size;
#else
bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
index 56b71bed..0971929e 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -39,7 +39,7 @@
struct buf_pool_cfg {
void *addr;
/**< The address from where DPAA2 will carve out the buffers */
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of the memory provided in addr */
uint32_t num;
/**< Number of buffers */
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
new file mode 100644
index 00000000..a2e2863c
--- /dev/null
+++ b/drivers/mempool/octeontx/Makefile
@@ -0,0 +1,68 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_mempool_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_rte_mempool_octeontx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+else
+CFLAGS_rte_mempool_octeontx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
new file mode 100644
index 00000000..3bc50f35
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -0,0 +1,830 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All Right reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_fpavf.h"
+
+/* FPA Mbox Message */
+#define IDENTIFY 0x0
+
+#define FPA_CONFIGSET 0x1
+#define FPA_CONFIGGET 0x2
+#define FPA_START_COUNT 0x3
+#define FPA_STOP_COUNT 0x4
+#define FPA_ATTACHAURA 0x5
+#define FPA_DETACHAURA 0x6
+#define FPA_SETAURALVL 0x7
+#define FPA_GETAURALVL 0x8
+
+#define FPA_COPROC 0x1
+
+/* fpa mbox struct */
+struct octeontx_mbox_fpa_cfg {
+ int aid;
+ uint64_t pool_cfg;
+ uint64_t pool_stack_base;
+ uint64_t pool_stack_end;
+ uint64_t aura_cfg;
+};
+
+struct __attribute__((__packed__)) gen_req {
+ uint32_t value;
+};
+
+struct __attribute__((__packed__)) idn_req {
+ uint8_t domain_id;
+};
+
+struct __attribute__((__packed__)) gen_resp {
+ uint16_t domain_id;
+ uint16_t vfid;
+};
+
+struct __attribute__((__packed__)) dcfg_resp {
+ uint8_t sso_count;
+ uint8_t ssow_count;
+ uint8_t fpa_count;
+ uint8_t pko_count;
+ uint8_t tim_count;
+ uint8_t net_port_count;
+ uint8_t virt_port_count;
+};
+
+#define FPA_MAX_POOL 32
+#define FPA_PF_PAGE_SZ 4096
+
+#define FPA_LN_SIZE 128
+#define FPA_ROUND_UP(x, size) \
+ ((((unsigned long)(x)) + size-1) & (~(size-1)))
+#define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
+#define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
+
+#define POOL_ENA (0x1 << 0)
+#define POOL_DIS (0x0 << 0)
+#define POOL_SET_NAT_ALIGN (0x1 << 1)
+#define POOL_DIS_NAT_ALIGN (0x0 << 1)
+#define POOL_STYPE(x) (((x) & 0x1) << 2)
+#define POOL_LTYPE(x) (((x) & 0x3) << 3)
+#define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
+#define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
+
+struct fpavf_res {
+ void *pool_stack_base;
+ void *bar0;
+ uint64_t stack_ln_ptr;
+ uint16_t domain_id;
+ uint16_t vf_id; /* gpool_id */
+ uint16_t sz128; /* Block size in cache lines */
+ bool is_inuse;
+};
+
+struct octeontx_fpadev {
+ rte_spinlock_t lock;
+ uint8_t total_gpool_cnt;
+ struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+/* lock is taken by caller */
+static int
+octeontx_fpa_gpool_alloc(unsigned int object_size)
+{
+ struct fpavf_res *res = NULL;
+ uint16_t gpool;
+ unsigned int sz128;
+
+ sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
+
+ for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
+
+ /* Skip VF that is not mapped Or _inuse */
+ if ((fpadev.pool[gpool].bar0 == NULL) ||
+ (fpadev.pool[gpool].is_inuse == true))
+ continue;
+
+ res = &fpadev.pool[gpool];
+
+ RTE_ASSERT(res->domain_id != (uint16_t)~0);
+ RTE_ASSERT(res->vf_id != (uint16_t)~0);
+ RTE_ASSERT(res->stack_ln_ptr != 0);
+
+ if (res->sz128 == 0) {
+ res->sz128 = sz128;
+
+ fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
+ return gpool;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* lock is taken by caller */
+static __rte_always_inline uintptr_t
+octeontx_fpa_gpool2handle(uint16_t gpool)
+{
+ struct fpavf_res *res = NULL;
+
+ RTE_ASSERT(gpool < FPA_VF_MAX);
+
+ res = &fpadev.pool[gpool];
+ return (uintptr_t)res->bar0 | gpool;
+}
+
+static __rte_always_inline bool
+octeontx_fpa_handle_valid(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+ int i;
+ bool ret = false;
+
+ if (unlikely(!handle))
+ return ret;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* get the bar address */
+ handle &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if ((uintptr_t)fpadev.pool[i].bar0 != handle)
+ continue;
+
+ /* validate gpool */
+ if (gpool != i)
+ return false;
+
+ res = &fpadev.pool[i];
+
+ if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
+ res->stack_ln_ptr == 0)
+ ret = false;
+ else
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
+ signed short buf_offset, unsigned int max_buf_count)
+{
+ void *memptr = NULL;
+ rte_iova_t phys_addr;
+ unsigned int memsz;
+ struct fpavf_res *fpa = NULL;
+ uint64_t reg;
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool];
+ memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
+ FPA_LN_SIZE;
+
+ /* Round-up to page size */
+ memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
+ memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
+ if (memptr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Configure stack */
+ fpa->pool_stack_base = memptr;
+ phys_addr = rte_malloc_virt2iova(memptr);
+
+ buf_size /= FPA_LN_SIZE;
+
+ /* POOL setup */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ buf_offset /= FPA_LN_SIZE;
+ reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
+ POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
+ POOL_ENA;
+
+ cfg.aid = 0;
+ cfg.pool_cfg = reg;
+ cfg.pool_stack_base = phys_addr;
+ cfg.pool_stack_end = phys_addr + memsz;
+ cfg.aura_cfg = (1 << 9);
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
+ fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
+ cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
+
+ /* Now pool is in_use */
+ fpa->is_inuse = true;
+
+err:
+ if (ret < 0)
+ rte_free(memptr);
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_destroy(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ struct fpavf_res *fpa = NULL;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool_index];
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ /* reset and free the pool */
+ cfg.aid = 0;
+ cfg.pool_cfg = 0;
+ cfg.pool_stack_base = 0;
+ cfg.pool_stack_end = 0;
+ cfg.aura_cfg = 0;
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ ret = 0;
+err:
+ /* anycase free pool stack memory */
+ rte_free(fpa->pool_stack_base);
+ fpa->pool_stack_base = NULL;
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_attach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_ATTACHAURA;
+ hdr.vfid = gpool_index;
+ hdr.res_code = 0;
+ memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
+ cfg.aid = gpool_index; /* gpool is guara */
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ fpavf_log_err("Could not attach fpa ");
+ fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, gpool_index, ret, hdr.res_code);
+ ret = -EACCES;
+ goto err;
+ }
+err:
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_detach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_fpa_cfg cfg = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cfg.aid = gpool_index; /* gpool is gaura */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_DETACHAURA;
+ hdr.vfid = gpool_index;
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+static int
+octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool)
+{
+ uint64_t va_end;
+
+ if (unlikely(!handle))
+ return -ENODEV;
+
+ va_end = (uintptr_t)memva + memsz;
+ va_end &= ~RTE_CACHE_LINE_MASK;
+
+ /* VHPOOL setup */
+ fpavf_write64((uintptr_t)memva,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(va_end,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+ return 0;
+}
+
+static int
+octeontx_fpapf_start_count(uint16_t gpool_index)
+{
+ int ret = 0;
+ struct octeontx_mbox_hdr hdr = {0};
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_START_COUNT;
+ hdr.vfid = gpool_index;
+ ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Could not start buffer counting for ");
+ fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_fpavf_free(unsigned int gpool)
+{
+ int ret = 0;
+
+ if (gpool >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Pool is free */
+ fpadev.pool[gpool].is_inuse = false;
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_gpool_free(uint16_t gpool)
+{
+ if (fpadev.pool[gpool].sz128 != 0) {
+ fpadev.pool[gpool].sz128 = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Return buffer size for a given pool
+ */
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ res = &fpadev.pool[gpool];
+ return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
+}
+
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle)
+{
+ uint64_t cnt, limit, avail;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ limit = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ return RTE_MIN(avail, (limit - cnt));
+}
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, char **va_start,
+ int node_id)
+{
+ unsigned int gpool;
+ void *memva;
+ unsigned long memsz;
+ uintptr_t gpool_handle;
+ uintptr_t pool_bar;
+ int res;
+
+ RTE_SET_USED(node_id);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+
+ if (unlikely(*va_start == NULL))
+ goto error_end;
+
+ object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
+ if (object_size > FPA_MAX_OBJ_SIZE) {
+ errno = EINVAL;
+ goto error_end;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+ res = octeontx_fpa_gpool_alloc(object_size);
+
+ /* Bail if failed */
+ if (unlikely(res < 0)) {
+ errno = res;
+ goto error_unlock;
+ }
+
+ /* get fpavf */
+ gpool = res;
+
+ /* get pool handle */
+ gpool_handle = octeontx_fpa_gpool2handle(gpool);
+ if (!octeontx_fpa_handle_valid(gpool_handle)) {
+ errno = ENOSPC;
+ goto error_gpool_free;
+ }
+
+ /* Get pool bar address from handle */
+ pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
+ object_count);
+ if (res < 0) {
+ errno = res;
+ goto error_gpool_free;
+ }
+
+ /* populate AURA fields */
+ res = octeontx_fpapf_aura_attach(gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_pool_destroy;
+ }
+
+ /* vf pool setup */
+ memsz = object_size * object_count;
+ memva = *va_start;
+ res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_gaura_detach;
+ }
+
+ /* Release lock */
+ rte_spinlock_unlock(&fpadev.lock);
+
+ /* populate AURA registers */
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ octeontx_fpapf_start_count(gpool);
+
+ return gpool_handle;
+
+error_gaura_detach:
+ (void) octeontx_fpapf_aura_detach(gpool);
+error_pool_destroy:
+ octeontx_fpavf_free(gpool);
+ octeontx_fpapf_pool_destroy(gpool);
+error_gpool_free:
+ octeontx_gpool_free(gpool);
+error_unlock:
+ rte_spinlock_unlock(&fpadev.lock);
+error_end:
+ return (uintptr_t)NULL;
+}
+
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+ void **node, **curr, *head = NULL;
+ uint64_t sz;
+ uint64_t cnt, avail;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+ int ret;
+
+ RTE_SET_USED(node_id);
+
+ /* Wait for all outstanding writes to be committed */
+ rte_smp_wmb();
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the pool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ /* Check for no outstanding buffers */
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ if (cnt) {
+ fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ /* Prepare to empty the entire POOL */
+ fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ /* Empty the pool */
+ /* Invalidate the POOL */
+ octeontx_gpool_free(gpool);
+
+ /* Process all buffers in the pool */
+ while (avail--) {
+
+ /* Yank a buffer from the pool */
+ node = (void *)(uintptr_t)
+ fpavf_read64((void *)
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+
+ if (node == NULL) {
+ fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+ gpool, avail);
+ break;
+ }
+
+ /* Imsert it into an ordered linked list */
+ for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+ if ((uintptr_t)node <= (uintptr_t)curr[0])
+ break;
+ }
+ node[0] = curr[0];
+ curr[0] = node;
+ }
+
+ /* Verify the linked list to be a perfect series */
+ sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+ for (curr = head; curr != NULL && curr[0] != NULL;
+ curr = curr[0]) {
+ if (curr == curr[0] ||
+ ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+ fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+ gpool, curr, curr[0]);
+ }
+ }
+
+ /* Disable pool operation */
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+ (void)octeontx_fpapf_pool_destroy(gpool);
+
+ /* Deactivate the AURA */
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ ret = octeontx_fpapf_aura_detach(gpool);
+ if (ret) {
+ fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ gpool, ret);
+ }
+
+ /* Free VF */
+ (void)octeontx_fpavf_free(gpool);
+
+ rte_spinlock_unlock(&fpadev.lock);
+ return 0;
+}
+
+static void
+octeontx_fpavf_setup(void)
+{
+ uint8_t i;
+ static bool init_once;
+
+ if (!init_once) {
+ rte_spinlock_init(&fpadev.lock);
+ fpadev.total_gpool_cnt = 0;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ fpadev.pool[i].domain_id = ~0;
+ fpadev.pool[i].stack_ln_ptr = 0;
+ fpadev.pool[i].sz128 = 0;
+ fpadev.pool[i].bar0 = NULL;
+ fpadev.pool[i].pool_stack_base = NULL;
+ fpadev.pool[i].is_inuse = false;
+ }
+ init_once = 1;
+ }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+ uint64_t val;
+ uint16_t domain_id;
+ uint16_t vf_id;
+ uint64_t stack_ln_ptr;
+
+ val = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+ domain_id = (val >> 8) & 0xffff;
+ vf_id = (val >> 24) & 0xffff;
+
+ stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHPOOL_THRESHOLD(0)));
+ if (vf_id >= FPA_VF_MAX) {
+ fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+ return -1;
+ }
+
+ if (fpadev.pool[vf_id].is_inuse) {
+ fpavf_log_err("vf_id %d is_inuse\n", vf_id);
+ return -1;
+ }
+
+ fpadev.pool[vf_id].domain_id = domain_id;
+ fpadev.pool[vf_id].vf_id = vf_id;
+ fpadev.pool[vf_id].bar0 = bar0;
+ fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+
+ /* SUCCESS */
+ return vf_id;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint8_t *idreg;
+ int res;
+ struct fpavf_res *fpa = NULL;
+
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(fpa);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+
+ octeontx_fpavf_setup();
+
+ res = octeontx_fpavf_identify(idreg);
+ if (res < 0)
+ return -1;
+
+ fpa = &fpadev.pool[res];
+ fpadev.total_gpool_cnt++;
+ rte_wmb();
+
+ fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+ fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+ fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+ .id_table = pci_fpavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
new file mode 100644
index 00000000..1d09f007
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -0,0 +1,131 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_FPAVF_H__
+#define __OCTEONTX_FPAVF_H__
+
+#include <rte_io.h>
+#include "octeontx_pool_logs.h"
+
+/* fpa pool Vendor ID and Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
+
+#define FPA_VF_MAX 32
+#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+
+/* FPA VF register offsets */
+#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
+#define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
+
+#define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
+
+#define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
+
+#define FPA_VF_FREE_ADDRS_S(x, y, z) \
+ ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+
+/* FPA VF register offsets from VF_BAR4, size 2 MByte */
+#define FPA_VF_MSIX_VEC_ADDR 0x00000
+#define FPA_VF_MSIX_VEC_CTL 0x00008
+#define FPA_VF_MSIX_PBA 0xF0000
+
+#define FPA_VF0_APERTURE_SHIFT 22
+#define FPA_AURA_SET_SIZE 16
+
+#define FPA_MAX_OBJ_SIZE (128 * 1024)
+#define OCTEONTX_FPAVF_BUF_OFFSET 128
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, the relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, char **va_start,
+ int node);
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle);
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle);
+
+static __rte_always_inline uint8_t
+octeontx_fpa_bufpool_gpool(uintptr_t handle)
+{
+ return (uint8_t)handle & FPA_GPOOL_MASK;
+}
+#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c
index 764414b5..9525da1a 100644
--- a/drivers/event/octeontx/ssovf_mbox.c
+++ b/drivers/mempool/octeontx/octeontx_mbox.c
@@ -38,10 +38,11 @@
#include <rte_io.h>
#include <rte_spinlock.h>
-#include "ssovf_evdev.h"
+#include "octeontx_mbox.h"
+#include "octeontx_pool_logs.h"
/* Mbox operation timeout in seconds */
-#define MBOX_WAIT_TIME_SEC 3
+#define MBOX_WAIT_TIME_SEC 3
#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
/* Mbox channel state */
@@ -88,6 +89,15 @@ struct mbox_ram_hdr {
};
static inline void
+mbox_msgcpy(uint8_t *d, const uint8_t *s, uint16_t size)
+{
+ uint16_t i;
+
+ for (i = 0; i < size; i++)
+ d[i] = s[i];
+}
+
+static inline void
mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
const void *txmsg, uint16_t txsize)
{
@@ -106,7 +116,7 @@ mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
/* Copy msg body */
if (txmsg)
- memcpy(ram_mbox_msg, txmsg, txsize);
+ mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
/* Prepare new hdr */
new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
@@ -166,12 +176,12 @@ mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
len = RTE_MIN(rx_hdr.len, rxsize);
if (rxmsg)
- memcpy(rxmsg, ram_mbox_msg, len);
+ mbox_msgcpy(rxmsg, ram_mbox_msg, len);
return len;
error:
- ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
hdr->res_code);
return res;
@@ -185,7 +195,7 @@ mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
if (m->init_once == 0 || hdr == NULL ||
txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
- ssovf_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
m->init_once, hdr, txsize, rxsize);
return res;
}
@@ -209,7 +219,7 @@ mbox_setup(struct mbox *m)
m->reg += SSO_VHGRP_PF_MBOX(1);
if (m->ram_mbox_base == NULL || m->reg == NULL) {
- ssovf_log_err("Invalid ram_mbox_base=%p or reg=%p",
+ mbox_log_err("Invalid ram_mbox_base=%p or reg=%p",
m->ram_mbox_base, m->reg);
return -EINVAL;
}
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/mempool/octeontx/octeontx_mbox.h
index ba6d5142..49f38257 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
+++ b/drivers/mempool/octeontx/octeontx_mbox.h
@@ -30,11 +30,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef __RTE_PMD_OCTEONTX_SSOVF_H__
-#define __RTE_PMD_OCTEONTX_SSOVF_H__
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
#include <rte_common.h>
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
struct octeontx_ssovf_info {
uint16_t domain; /* Domain id */
uint8_t total_ssovfs; /* Total sso groups available in domain */
@@ -58,4 +61,4 @@ void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
-#endif /* __RTE_PMD_OCTEONTX_SSOVF_H__ */
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
new file mode 100644
index 00000000..58ccb0f0
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -0,0 +1,68 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_POOL_LOGS_H__
+#define __OCTEONTX_POOL_LOGS_H__
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
+#define fpavf_log_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define fpavf_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define mbox_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#else
+#define fpavf_log_info(fmt, args...)
+#define fpavf_log_dbg(fmt, args...)
+#define mbox_log_info(fmt, args...)
+#define mbox_log_dbg(fmt, args...)
+#endif
+
+#define fpavf_func_trace fpavf_log_dbg
+#define fpavf_log_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define mbox_func_trace mbox_log_dbg
+#define mbox_log_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/mempool/octeontx/octeontx_ssovf.c
index e1c0c6d5..012c887d 100644
--- a/drivers/event/octeontx/ssovf_probe.c
+++ b/drivers/mempool/octeontx/octeontx_ssovf.c
@@ -35,8 +35,19 @@
#include <rte_eal.h>
#include <rte_io.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
-#include "ssovf_evdev.h"
+#include "octeontx_mbox.h"
+#include "octeontx_pool_logs.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
struct ssovf_res {
uint16_t domain;
@@ -86,7 +97,7 @@ octeontx_ssovf_info(struct octeontx_ssovf_info *info)
if (sdev.grp[i].vfid != i ||
sdev.grp[i].bar0 == NULL ||
sdev.grp[i].domain != domain) {
- ssovf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
i, sdev.grp[i].vfid,
domain, sdev.grp[i].domain,
sdev.grp[i].bar0);
@@ -99,7 +110,7 @@ octeontx_ssovf_info(struct octeontx_ssovf_info *info)
if (sdev.hws[i].vfid != i ||
sdev.hws[i].bar0 == NULL ||
sdev.hws[i].domain != domain) {
- ssovf_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
i, sdev.hws[i].vfid,
domain, sdev.hws[i].domain,
sdev.hws[i].bar0);
@@ -169,7 +180,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (pci_dev->mem_resource[0].addr == NULL ||
pci_dev->mem_resource[2].addr == NULL ||
pci_dev->mem_resource[4].addr == NULL) {
- ssovf_log_err("Empty bars %p %p %p",
+ mbox_log_err("Empty bars %p %p %p",
pci_dev->mem_resource[0].addr,
pci_dev->mem_resource[2].addr,
pci_dev->mem_resource[4].addr);
@@ -177,7 +188,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
- ssovf_log_err("Bar4 len mismatch %d != %d",
+ mbox_log_err("Bar4 len mismatch %d != %d",
SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
return -EINVAL;
}
@@ -185,7 +196,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
id = pci_dev->mem_resource[4].addr;
vfid = id->vfid;
if (vfid >= SSO_MAX_VHWS) {
- ssovf_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
return -EINVAL;
}
@@ -198,7 +209,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sdev.total_ssowvfs++;
rte_wmb();
- ssovf_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
res->vfid, sdev.total_ssowvfs);
return 0;
}
@@ -239,7 +250,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (pci_dev->mem_resource[0].addr == NULL ||
pci_dev->mem_resource[2].addr == NULL) {
- ssovf_log_err("Empty bars %p %p",
+ mbox_log_err("Empty bars %p %p",
pci_dev->mem_resource[0].addr,
pci_dev->mem_resource[2].addr);
return -ENODEV;
@@ -252,7 +263,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
rte_write64((1ULL << 33) - 1, idreg);
vfid = (val >> 16) & 0xffff;
if (vfid >= SSO_MAX_VHGRP) {
- ssovf_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
return -EINVAL;
}
@@ -264,7 +275,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sdev.total_ssovfs++;
rte_wmb();
- ssovf_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
res->vfid, sdev.total_ssovfs);
return 0;
}
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
new file mode 100644
index 00000000..e89355cd
--- /dev/null
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -0,0 +1,253 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_fpavf.h"
+
+/*
+ * Per-pool descriptor.
+ * Links mempool with the corresponding memzone,
+ * that provides memory under the pool's elements.
+ */
+struct octeontx_pool_info {
+ const struct rte_mempool *mp;
+ uintptr_t mz_addr;
+
+ SLIST_ENTRY(octeontx_pool_info) link;
+};
+
+SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
+
+/* List of the allocated pools */
+static struct octeontx_pool_list octeontx_pool_head =
+ SLIST_HEAD_INITIALIZER(octeontx_pool_head);
+/* Spinlock to protect pool list */
+static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int
+octeontx_fpavf_alloc(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ struct octeontx_pool_info *pool_info;
+ uint32_t memseg_count = mp->size;
+ uint32_t object_size;
+ uintptr_t va_start;
+ int rc = 0;
+
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+ if (pool_info->mp == mp)
+ break;
+ }
+ if (pool_info == NULL) {
+ rte_spinlock_unlock(&pool_list_lock);
+ return -ENXIO;
+ }
+
+ /* virtual hugepage mapped addr */
+ va_start = pool_info->mz_addr;
+ rte_spinlock_unlock(&pool_list_lock);
+
+ object_size = mp->elt_size + mp->header_size + mp->trailer_size;
+
+ pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
+ OCTEONTX_FPAVF_BUF_OFFSET,
+ (char **)&va_start,
+ mp->socket_id);
+ rc = octeontx_fpa_bufpool_block_size(pool);
+ if (rc < 0)
+ goto _end;
+
+ if ((uint32_t)rc != object_size)
+ fpavf_log_err("buffer size mismatch: %d instead of %u\n",
+ rc, object_size);
+
+ fpavf_log_info("Pool created %p with .. ", (void *)pool);
+ fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
+
+ /* assign pool handle to mempool */
+ mp->pool_id = (uint64_t)pool;
+
+ return 0;
+
+_end:
+ return rc;
+}
+
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+ struct octeontx_pool_info *pool_info;
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+ if (pool_info->mp == mp)
+ break;
+ }
+
+ if (pool_info == NULL) {
+ rte_spinlock_unlock(&pool_list_lock);
+ rte_panic("%s: trying to free pool with no valid metadata",
+ __func__);
+ }
+
+ SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
+ rte_spinlock_unlock(&pool_list_lock);
+
+ rte_free(pool_info);
+ octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
+static __rte_always_inline void *
+octeontx_fpa_bufpool_alloc(uintptr_t handle)
+{
+ return (void *)(uintptr_t)fpavf_read64((void *)(handle +
+ FPA_VF_VHAURA_OP_ALLOC(0)));
+}
+
+static __rte_always_inline void
+octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
+{
+ uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
+ 0 /* DWB */, 1 /* FABS */);
+
+ fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
+}
+
+static int
+octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ uintptr_t pool;
+ unsigned int index;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++)
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+
+ return 0;
+}
+
+static int
+octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned int n)
+{
+ unsigned int index;
+ uintptr_t pool;
+ void *obj;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++) {
+ obj = octeontx_fpa_bufpool_alloc(pool);
+ if (obj == NULL) {
+ /*
+ * Failed to allocate the requested number of objects
+ * from the pool. Current pool implementation requires
+ * completing the entire request or returning error
+ * otherwise.
+ * Free already allocated buffers to the pool.
+ */
+ for (; index > 0; index--) {
+ obj_table--;
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+ }
+ return -ENOMEM;
+ }
+ *obj_table = obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+octeontx_fpavf_get_count(const struct rte_mempool *mp)
+{
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ return octeontx_fpa_bufpool_free_count(pool);
+}
+
+static int
+octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
+ unsigned int *flags)
+{
+ RTE_SET_USED(mp);
+ *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
+ MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
+ return 0;
+}
+
+static int
+octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
+ char *vaddr, rte_iova_t paddr, size_t len)
+{
+ struct octeontx_pool_info *pool_info;
+
+ RTE_SET_USED(paddr);
+ RTE_SET_USED(len);
+
+ pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
+ if (pool_info == NULL)
+ return -ENOMEM;
+
+ pool_info->mp = mp;
+ pool_info->mz_addr = (uintptr_t)vaddr;
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
+ rte_spinlock_unlock(&pool_list_lock);
+ return 0;
+}
+
+static struct rte_mempool_ops octeontx_fpavf_ops = {
+ .name = "octeontx_fpavf",
+ .alloc = octeontx_fpavf_alloc,
+ .free = octeontx_fpavf_free,
+ .enqueue = octeontx_fpavf_enqueue,
+ .dequeue = octeontx_fpavf_dequeue,
+ .get_count = octeontx_fpavf_get_count,
+ .get_capabilities = octeontx_fpavf_get_capabilities,
+ .register_memory_area = octeontx_fpavf_register_memory_area,
+};
+
+MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
new file mode 100644
index 00000000..fe8cdeca
--- /dev/null
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -0,0 +1,9 @@
+DPDK_17.11 {
+ global:
+
+ octeontx_ssovf_info;
+ octeontx_ssovf_bar;
+ octeontx_ssovf_mbox_send;
+
+ local: *;
+};
diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile
index b339d907..a7889b96 100644
--- a/drivers/mempool/ring/Makefile
+++ b/drivers/mempool/ring/Makefile
@@ -37,6 +37,7 @@ LIB = librte_mempool_ring.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
EXPORT_MAP := rte_mempool_ring_version.map
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
index 7577b23c..f8d6c574 100644
--- a/drivers/mempool/stack/Makefile
+++ b/drivers/mempool/stack/Makefile
@@ -40,6 +40,7 @@ CFLAGS += $(WERROR_FLAGS)
# Headers
CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
EXPORT_MAP := rte_mempool_stack_version.map
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d33c9590..ef09b4e1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -36,78 +36,55 @@ ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d)
$(warning thunderx pmd is not supported by old compilers)
endif
-core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
-core-libs += librte_net librte_kvargs
-
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
-DEPDIRS-af_packet = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
-DEPDIRS-ark = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
-DEPDIRS-avp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
-DEPDIRS-bnx2x = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
-DEPDIRS-bonding = $(core-libs) librte_cmdline
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
-DEPDIRS-cxgbe = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
-DEPDIRS-dpaa2 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
-DEPDIRS-e1000 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
-DEPDIRS-ena = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
-DEPDIRS-enic = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe
-DEPDIRS-failsafe = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
-DEPDIRS-fm10k = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
-DEPDIRS-i40e = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
-DEPDIRS-ixgbe = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
-DEPDIRS-liquidio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
-DEPDIRS-mlx4 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
-DEPDIRS-mlx5 = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
-DEPDIRS-nfp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
-DEPDIRS-bnxt = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
-DEPDIRS-null = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx
DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap
-DEPDIRS-pcap = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede
-DEPDIRS-qede = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring
-DEPDIRS-ring = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
-DEPDIRS-sfc = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
-DEPDIRS-szedata2 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
-DEPDIRS-tap = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
-DEPDIRS-thunderx = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
-DEPDIRS-virtio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
-DEPDIRS-vmxnet3 = $(core-libs)
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
-DEPDIRS-xenvirt = $(core-libs) librte_cmdline
ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += kni
endif
-DEPDIRS-kni = $(core-libs) librte_kni
+
+ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += softnic
+endif # $(CONFIG_RTE_LIBRTE_SCHED)
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
endif # $(CONFIG_RTE_LIBRTE_VHOST)
-DEPDIRS-vhost = $(core-libs) librte_vhost
+
+ifeq ($(CONFIG_RTE_LIBRTE_MRVL_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n)
+$(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!")
+endif
+endif
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index 70d517c1..bb37d67a 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -44,6 +44,9 @@ LIBABIVER := 1
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
#
# all source are stored in SRCS-y
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 9a47852c..fa84eb9c 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -41,7 +41,7 @@
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
@@ -59,6 +59,7 @@
#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
+#define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
#define DFLT_BLOCK_SIZE (1 << 12)
#define DFLT_FRAME_SIZE (1 << 11)
@@ -75,7 +76,7 @@ struct pkt_rx_queue {
unsigned int framenum;
struct rte_mempool *mb_pool;
- uint8_t in_port;
+ uint16_t in_port;
volatile unsigned long rx_pkts;
volatile unsigned long err_pkts;
@@ -115,6 +116,7 @@ static const char *valid_arguments[] = {
ETH_AF_PACKET_BLOCKSIZE_ARG,
ETH_AF_PACKET_FRAMESIZE_ARG,
ETH_AF_PACKET_FRAMECOUNT_ARG,
+ ETH_AF_PACKET_QDISC_BYPASS_ARG,
NULL
};
@@ -165,7 +167,7 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* check for vlan info */
if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
mbuf->vlan_tci = ppd->tp_vlan_tci;
- mbuf->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
}
/* release incoming frame and advance ring buffer */
@@ -216,7 +218,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
mbuf = *bufs++;
/* drop oversized packets */
- if (rte_pktmbuf_data_len(mbuf) > pkt_q->frame_data_size) {
+ if (mbuf->pkt_len > pkt_q->frame_data_size) {
rte_pktmbuf_free(mbuf);
continue;
}
@@ -237,8 +239,17 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* copy the tx frame data */
pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
sizeof(struct sockaddr_ll);
- memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
- ppd->tp_len = ppd->tp_snaplen = rte_pktmbuf_data_len(mbuf);
+
+ struct rte_mbuf *tmp_mbuf = mbuf;
+ while (tmp_mbuf) {
+ uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
+ memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
+ pbuf += data_len;
+ tmp_mbuf = tmp_mbuf->next;
+ }
+
+ ppd->tp_len = mbuf->pkt_len;
+ ppd->tp_snaplen = mbuf->pkt_len;
/* release incoming frame and advance ring buffer */
ppd->tp_status = TP_STATUS_SEND_REQUEST;
@@ -320,7 +331,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
{
unsigned i, imax;
@@ -353,6 +364,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
igb_stats->opackets = tx_total;
igb_stats->oerrors = tx_err_total;
igb_stats->obytes = tx_bytes_total;
+ return 0;
}
static void
@@ -550,6 +562,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
unsigned int blockcnt,
unsigned int framesize,
unsigned int framecnt,
+ unsigned int qdisc_bypass,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev,
struct rte_kvargs *kvlist)
@@ -571,9 +584,6 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
#if defined(PACKET_FANOUT)
int fanout_arg;
#endif
-#if defined(PACKET_QDISC_BYPASS)
- int bypass;
-#endif
for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
pair = &kvlist->pairs[k_idx];
@@ -689,9 +699,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
}
#if defined(PACKET_QDISC_BYPASS)
- bypass = 1;
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
- &bypass, sizeof(bypass));
+ &qdisc_bypass, sizeof(qdisc_bypass));
if (rc == -1) {
RTE_LOG(ERR, PMD,
"%s: could not set PACKET_QDISC_BYPASS "
@@ -699,6 +708,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
pair->value);
goto error;
}
+#else
+ RTE_SET_USED(qdisc_bypass);
#endif
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
@@ -803,7 +814,6 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
return 0;
@@ -842,6 +852,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
unsigned int framesize = DFLT_FRAME_SIZE;
unsigned int framecount = DFLT_FRAME_COUNT;
unsigned int qpairs = 1;
+ unsigned int qdisc_bypass = 1;
/* do some parameter checking */
if (*sockfd < 0)
@@ -893,6 +904,16 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
}
continue;
}
+ if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
+ qdisc_bypass = atoi(pair->value);
+ if (qdisc_bypass > 1) {
+ RTE_LOG(ERR, PMD,
+ "%s: invalid bypass value\n",
+ name);
+ return -1;
+ }
+ continue;
+ }
}
if (framesize > blocksize) {
@@ -918,6 +939,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (rte_pmd_init_internals(dev, *sockfd, qpairs,
blocksize, blockcount,
framesize, framecount,
+ qdisc_bypass,
&internals, &eth_dev,
kvlist) < 0)
return -1;
@@ -1012,4 +1034,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
"qpairs=<int> "
"blocksz=<int> "
"framesz=<int> "
- "framecnt=<int>");
+ "framecnt=<int> "
+ "qdisc_bypass=<0|1>");
diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile
index ca64b195..f1433bd2 100644
--- a/drivers/net/ark/Makefile
+++ b/drivers/net/ark/Makefile
@@ -62,5 +62,8 @@ LDLIBS += -lpthread
ifdef CONFIG_RTE_EXEC_ENV_LINUXAPP
LDLIBS += -ldl
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index 221460c7..929dc7d1 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -93,7 +93,7 @@ ark_ddm_reset(struct ark_ddm_t *ddm)
}
void
-ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr, uint32_t interval)
+ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr, uint32_t interval)
{
ddm->setup.cons_write_index_addr = cons_addr;
ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index de61926c..f67ad012 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -127,7 +127,7 @@ struct ark_ddm_cpld_ps_t {
#define ARK_DDM_SETUP 0x00e0
struct ark_ddm_setup_t {
- phys_addr_t cons_write_index_addr;
+ rte_iova_t cons_write_index_addr;
uint32_t write_index_interval; /* 4ns each */
volatile uint32_t cons_index;
};
@@ -165,7 +165,7 @@ void ark_ddm_start(struct ark_ddm_t *ddm);
int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
void ark_ddm_reset(struct ark_ddm_t *ddm);
void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
-void ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr,
+void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
uint32_t interval);
void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 6db362b0..ff87c20e 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -35,12 +35,12 @@
#include <sys/stat.h>
#include <dlfcn.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev_pci.h>
#include <rte_kvargs.h>
#include "ark_global.h"
#include "ark_logs.h"
-#include "ark_ethdev.h"
#include "ark_ethdev_tx.h"
#include "ark_ethdev_rx.h"
#include "ark_mpu.h"
@@ -66,7 +66,7 @@ static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
-static void eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+static int eth_ark_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
@@ -242,7 +242,7 @@ check_for_ext(struct ark_adapter *ark)
(int (*)(struct rte_eth_dev *, void *))
dlsym(ark->d_handle, "dev_set_link_down");
ark->user_ext.stats_get =
- (void (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ (int (*)(struct rte_eth_dev *, struct rte_eth_stats *,
void *))
dlsym(ark->d_handle, "stats_get");
ark->user_ext.stats_reset =
@@ -343,7 +343,6 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
/* We are a single function multi-port device. */
ret = ark_config_device(dev);
dev->dev_ops = &ark_eth_dev_ops;
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
if (!dev->data->mac_addrs) {
@@ -452,10 +451,16 @@ ark_config_device(struct rte_eth_dev *dev)
*/
ark->start_pg = 0;
ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
+ if (ark->pg == NULL)
+ return -1;
ark_pktgen_reset(ark->pg);
ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1);
+ if (ark->pc == NULL)
+ return -1;
ark_pktchkr_stop(ark->pc);
ark->pd = ark_pktdir_init(ark->pktdir.v);
+ if (ark->pd == NULL)
+ return -1;
/* Verify HW */
if (ark_udm_verify(ark->udm.v))
@@ -641,7 +646,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
status = eth_ark_tx_queue_stop(dev, i);
if (status != 0) {
- uint8_t port = dev->data->port_id;
+ uint16_t port = dev->data->port_id;
PMD_DRV_LOG(ERR,
"tx_queue stop anomaly"
" port %u, queue %u\n",
@@ -693,7 +698,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
ark_udm_dump_stats(ark->udm.v, "Post stop");
ark_udm_dump_perf(ark->udm.v, "Post stop");
- for (i = 0; i < dev->data->nb_tx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_ark_rx_dump_queue(dev, i, __func__);
/* Stop the packet checker if it is running */
@@ -811,7 +816,7 @@ eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
return 0;
}
-static void
+static int
eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint16_t i;
@@ -830,8 +835,9 @@ eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
if (ark->user_ext.stats_get)
- ark->user_ext.stats_get(dev, stats,
+ return ark->user_ext.stats_get(dev, stats,
ark->user_data[dev->data->port_id]);
+ return 0;
}
static void
diff --git a/drivers/net/ark/ark_ethdev.h b/drivers/net/ark/ark_ethdev.h
deleted file mode 100644
index df5547bf..00000000
--- a/drivers/net/ark/ark_ethdev.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _ARK_ETHDEV_H_
-#define _ARK_ETHDEV_H_
-
-#endif
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index f39e6f68..a3c0377c 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -36,7 +36,6 @@
#include "ark_ethdev_rx.h"
#include "ark_global.h"
#include "ark_logs.h"
-#include "ark_ethdev.h"
#include "ark_mpu.h"
#include "ark_udm.h"
@@ -62,7 +61,7 @@ struct ark_rx_queue {
struct rte_mbuf **reserve_q;
/* array of physical addresses of the mbuf data pointer */
/* This point is a virtual address */
- phys_addr_t *paddress_q;
+ rte_iova_t *paddress_q;
struct rte_mempool *mb_pool;
struct ark_udm_t *udm;
@@ -96,18 +95,18 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
struct ark_rx_queue *queue,
uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
{
- phys_addr_t queue_base;
- phys_addr_t phys_addr_q_base;
- phys_addr_t phys_addr_prod_index;
+ rte_iova_t queue_base;
+ rte_iova_t phys_addr_q_base;
+ rte_iova_t phys_addr_prod_index;
- queue_base = rte_malloc_virt2phy(queue);
+ queue_base = rte_malloc_virt2iova(queue);
phys_addr_prod_index = queue_base +
offsetof(struct ark_rx_queue, prod_index);
- phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+ phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
/* Verify HW */
- if (ark_mpu_verify(queue->mpu, sizeof(phys_addr_t))) {
+ if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
return -1;
}
@@ -205,7 +204,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
queue->paddress_q =
rte_zmalloc_socket("Ark_rx_queue paddr",
- nb_desc * sizeof(phys_addr_t),
+ nb_desc * sizeof(rte_iova_t),
64,
socket_id);
@@ -500,22 +499,22 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
case 0:
while (count != nb) {
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 3:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 2:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
case 1:
queue->paddress_q[seed_m++] =
- (*mbufs++)->buf_physaddr;
+ (*mbufs++)->buf_iova;
count++;
/* FALLTHROUGH */
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 9ae7ae0e..4ef55d10 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -37,7 +37,6 @@
#include "ark_global.h"
#include "ark_mpu.h"
#include "ark_ddm.h"
-#include "ark_ethdev.h"
#include "ark_logs.h"
#define ARK_TX_META_SIZE 32
@@ -94,7 +93,7 @@ eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
const struct rte_mbuf *mbuf,
uint8_t flags)
{
- meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
meta->delta_ns = 0;
meta->data_len = rte_pktmbuf_data_len(mbuf);
meta->flags = flags;
@@ -311,15 +310,15 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
static int
eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
{
- phys_addr_t queue_base, ring_base, cons_index_addr;
+ rte_iova_t queue_base, ring_base, cons_index_addr;
uint32_t write_interval_ns;
/* Verify HW -- MPU */
if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
return -1;
- queue_base = rte_malloc_virt2phy(queue);
- ring_base = rte_malloc_virt2phy(queue->meta_q);
+ queue_base = rte_malloc_virt2iova(queue);
+ ring_base = rte_malloc_virt2iova(queue->meta_q);
cons_index_addr =
queue_base + offsetof(struct ark_tx_queue, cons_index);
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index 63b7a261..d26c8198 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -91,7 +91,7 @@ int dev_set_link_up(struct rte_eth_dev *dev,
int dev_set_link_down(struct rte_eth_dev *dev,
void *user_data);
-void stats_get(struct rte_eth_dev *dev,
+int stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats,
void *user_data);
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index 2a6375fe..aef2cf73 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -97,7 +97,7 @@ struct ark_user_ext {
int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *);
int (*dev_set_link_up)(struct rte_eth_dev *, void *);
int (*dev_set_link_down)(struct rte_eth_dev *, void *);
- void (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ int (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
void (*stats_reset)(struct rte_eth_dev *, void *);
void (*mac_addr_add)(struct rte_eth_dev *,
struct ether_addr *,
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c
index cd2c0788..d4ba6dc7 100644
--- a/drivers/net/ark/ark_mpu.c
+++ b/drivers/net/ark/ark_mpu.c
@@ -118,7 +118,7 @@ ark_mpu_reset_stats(struct ark_mpu_t *mpu)
}
int
-ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring, uint32_t ring_size,
+ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring, uint32_t ring_size,
int is_tx)
{
ark_mpu_reset(mpu);
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h
index a0171dbd..f6f6c808 100644
--- a/drivers/net/ark/ark_mpu.h
+++ b/drivers/net/ark/ark_mpu.h
@@ -75,7 +75,7 @@ struct ark_mpu_hw_t {
#define ARK_MPU_CFG 0x040
struct ark_mpu_cfg_t {
- phys_addr_t ring_base; /* phys_addr_t is a uint64_t */
+ rte_iova_t ring_base; /* rte_iova_t is a uint64_t */
uint32_t ring_size;
uint32_t ring_mask;
uint32_t min_host_move;
@@ -137,7 +137,7 @@ int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
void ark_mpu_stop(struct ark_mpu_t *mpu);
void ark_mpu_start(struct ark_mpu_t *mpu);
int ark_mpu_reset(struct ark_mpu_t *mpu);
-int ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring,
+int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring,
uint32_t ring_size, int is_tx);
void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index c3040af3..202a1d9b 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -112,6 +112,10 @@ ark_pktchkr_init(void *addr, int ord, int l2_mode)
struct ark_pkt_chkr_inst *inst =
rte_malloc("ark_pkt_chkr_inst",
sizeof(struct ark_pkt_chkr_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_chkr_inst.\n");
+ return inst;
+ }
inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr;
inst->cregs =
(struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100);
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
index 66e5ce24..eb47dedb 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/net/ark/ark_pktdir.c
@@ -36,6 +36,7 @@
#include "ark_pktdir.h"
#include "ark_global.h"
+#include "ark_logs.h"
ark_pkt_dir_t
@@ -45,6 +46,10 @@ ark_pktdir_init(void *base)
rte_malloc("ark_pkt_dir_inst",
sizeof(struct ark_pkt_dir_inst),
0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_dir_inst.\n");
+ return inst;
+ }
inst->regs = (struct ark_pkt_dir_regs *)base;
inst->regs->ctrl = 0x00110110; /* POR state */
return inst;
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index 8c7a8a2d..018f37b6 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -110,6 +110,10 @@ ark_pktgen_init(void *adr, int ord, int l2_mode)
struct ark_pkt_gen_inst *inst =
rte_malloc("ark_pkt_gen_inst_pmd",
sizeof(struct ark_pkt_gen_inst), 0);
+ if (inst == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc ark_pkt_gen_inst.\n");
+ return inst;
+ }
inst->regs = (struct ark_pkt_gen_regs *)adr;
inst->ordinal = ord;
inst->l2_mode = l2_mode;
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
index 1ba7d26d..7a429ac7 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/net/ark/ark_udm.c
@@ -122,7 +122,7 @@ ark_udm_configure(struct ark_udm_t *udm,
}
void
-ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr)
+ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr)
{
udm->rt_cfg.hw_prod_addr = addr;
}
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 29bf1e8f..915343fe 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -137,7 +137,7 @@ struct ark_udm_tlp_ps_t {
#define ARK_UDM_RT_CFG 0x00e0
struct ark_udm_rt_cfg_t {
- phys_addr_t hw_prod_addr;
+ rte_iova_t hw_prod_addr;
uint32_t write_interval; /* 4ns cycles */
volatile uint32_t prod_idx; /* RO */
};
@@ -171,7 +171,7 @@ void ark_udm_configure(struct ark_udm_t *udm,
uint32_t headroom,
uint32_t dataroom,
uint32_t write_interval_ns);
-void ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr);
+void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
void ark_udm_stats_reset(struct ark_udm_t *udm);
void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile
index cd465aac..c29ecf45 100644
--- a/drivers/net/avp/Makefile
+++ b/drivers/net/avp/Makefile
@@ -39,6 +39,9 @@ LIB = librte_pmd_avp.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_avp_version.map
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index c746a0e2..9b342bfa 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -40,11 +40,11 @@
#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
-#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_common.h>
#include <rte_cycles.h>
@@ -70,7 +70,7 @@ static void avp_dev_stop(struct rte_eth_dev *dev);
static void avp_dev_close(struct rte_eth_dev *dev);
static void avp_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -107,7 +107,7 @@ static uint16_t avp_xmit_pkts(void *tx_queue,
static void avp_dev_rx_queue_release(void *rxq);
static void avp_dev_tx_queue_release(void *txq);
-static void avp_dev_stats_get(struct rte_eth_dev *dev,
+static int avp_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void avp_dev_stats_reset(struct rte_eth_dev *dev);
@@ -190,7 +190,7 @@ struct avp_dev {
struct rte_eth_dev_data *dev_data;
/**< Back pointer to ethernet device data */
volatile uint32_t flags; /**< Device operational flags */
- uint8_t port_id; /**< Ethernet port identifier */
+ uint16_t port_id; /**< Ethernet port identifier */
struct rte_mempool *pool; /**< pkt mbuf mempool */
unsigned int guest_mbuf_size; /**< local pool mbuf size */
unsigned int host_mbuf_size; /**< host mbuf size */
@@ -387,7 +387,7 @@ avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
/* translate from host physical address to guest virtual address */
static void *
avp_dev_translate_address(struct rte_eth_dev *eth_dev,
- phys_addr_t host_phys_addr)
+ rte_iova_t host_phys_addr)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_mem_resource *resource;
@@ -1004,8 +1004,6 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
-
/* Check current migration status */
if (avp_dev_migration_pending(eth_dev)) {
PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
@@ -1361,7 +1359,7 @@ avp_dev_copy_from_buffers(struct avp_dev *avp,
src_offset = 0;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- ol_flags = PKT_RX_VLAN_PKT;
+ ol_flags = PKT_RX_VLAN;
vlan_tci = pkt_buf->vlan_tci;
} else {
ol_flags = 0;
@@ -1619,7 +1617,7 @@ avp_recv_pkts(void *rx_queue,
m->port = avp->port_id;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
- m->ol_flags = PKT_RX_VLAN_PKT;
+ m->ol_flags = PKT_RX_VLAN;
m->vlan_tci = pkt_buf->vlan_tci;
}
@@ -2031,7 +2029,12 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
mask = (ETH_VLAN_STRIP_MASK |
ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK);
- avp_vlan_offload_set(eth_dev, mask);
+ ret = avp_vlan_offload_set(eth_dev, mask);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
/* update device config */
memset(&config, 0, sizeof(config));
@@ -2214,7 +2217,7 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
}
}
-static void
+static int
avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -2239,9 +2242,11 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
+
+ return 0;
}
-static void
+static int
avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -2274,6 +2279,8 @@ avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
stats->q_errors[i] += txq->errors;
}
}
+
+ return 0;
}
static void
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
index 488d7216..54437e9a 100644
--- a/drivers/net/avp/rte_avp_common.h
+++ b/drivers/net/avp/rte_avp_common.h
@@ -243,7 +243,7 @@ struct rte_avp_desc {
*/
struct rte_avp_memmap {
void *addr;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint64_t length;
};
@@ -345,7 +345,7 @@ RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
*/
struct rte_avp_mempool_info {
void *addr;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
uint64_t length;
};
@@ -359,10 +359,10 @@ struct rte_avp_device_info {
char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
- phys_addr_t tx_phys;
- phys_addr_t rx_phys;
- phys_addr_t alloc_phys;
- phys_addr_t free_phys;
+ rte_iova_t tx_phys;
+ rte_iova_t rx_phys;
+ rte_iova_t alloc_phys;
+ rte_iova_t free_phys;
uint32_t features; /**< Supported feature bitmap */
uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
@@ -379,14 +379,14 @@ struct rte_avp_device_info {
uint32_t free_size; /**< Size of each free queue */
/* Used by Ethtool */
- phys_addr_t req_phys;
- phys_addr_t resp_phys;
- phys_addr_t sync_phys;
+ rte_iova_t req_phys;
+ rte_iova_t resp_phys;
+ rte_iova_t sync_phys;
void *sync_va;
/* mbuf mempool (used when a single memory area is supported) */
void *mbuf_va;
- phys_addr_t mbuf_phys;
+ rte_iova_t mbuf_phys;
/* mbuf mempools */
struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index e1231069..90ff8b1e 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -9,6 +9,9 @@ CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -DZLIB_CONST
LDLIBS += -lz
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_bnx2x_version.map
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 06733d15..9394f6c5 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -184,7 +184,7 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
return -ENOMEM;
}
- dma->paddr = (uint64_t) z->phys_addr;
+ dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
@@ -419,7 +419,7 @@ void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32)
}
void
-bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr,
+bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr,
uint32_t len32)
{
struct dmae_command dmae;
@@ -447,7 +447,7 @@ bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr
}
static void
-bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
@@ -823,14 +823,14 @@ bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
static void
__storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr,
- phys_addr_t mapping)
+ rte_iova_t mapping)
{
REG_WR(sc, addr, U64_LO(mapping));
REG_WR(sc, (addr + 4), U64_HI(mapping));
}
static void
-storm_memset_spq_addr(struct bnx2x_softc *sc, phys_addr_t mapping,
+storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping,
uint16_t abs_fid)
{
uint32_t addr = (XSEM_REG_FAST_MEMORY +
@@ -1498,7 +1498,7 @@ bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id,
ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
ramrod_param.rdata_mapping =
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
ramrod_param.ramrod_flags = ramrod_flags;
@@ -2135,7 +2135,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
@@ -4599,9 +4599,9 @@ static void bnx2x_init_func_obj(struct bnx2x_softc *sc)
ecore_init_func_obj(sc,
&sc->func_obj,
BNX2X_SP(sc, func_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata),
BNX2X_SP(sc, func_afex_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
&bnx2x_func_sp_drv);
}
@@ -4772,7 +4772,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
}
static void
-bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid,
+bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid,
uint8_t vf_valid, int fw_sb_id, int igu_sb_id)
{
struct hc_status_block_data_e2 sb_data_e2;
@@ -4918,7 +4918,7 @@ static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx)
sc->max_cos,
SC_FUNC(sc),
BNX2X_SP(sc, q_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata),
q_type);
/* configure classification DBs */
@@ -4928,7 +4928,7 @@ static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx)
idx,
SC_FUNC(sc),
BNX2X_SP(sc, mac_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata),
ECORE_FILTER_MAC_PENDING, &sc->sp_state,
ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
}
@@ -5028,7 +5028,7 @@ static void bnx2x_init_tx_rings(struct bnx2x_softc *sc)
static void bnx2x_init_def_sb(struct bnx2x_softc *sc)
{
struct host_sp_status_block *def_sb = sc->def_sb;
- phys_addr_t mapping = sc->def_sb_dma.paddr;
+ rte_iova_t mapping = sc->def_sb_dma.paddr;
int igu_sp_sb_index;
int igu_seg_id;
int port = SC_PORT(sc);
@@ -5700,7 +5700,7 @@ static void bnx2x_init_objs(struct bnx2x_softc *sc)
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, mcast_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
ECORE_FILTER_MCAST_PENDING,
&sc->sp_state, o_type);
@@ -5724,7 +5724,7 @@ static void bnx2x_init_objs(struct bnx2x_softc *sc)
SC_FUNC(sc),
SC_FUNC(sc),
BNX2X_SP(sc, rss_rdata),
- (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+ (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
ECORE_FILTER_RSS_CONF_PENDING,
&sc->sp_state, ECORE_OBJ_TYPE_RX);
}
@@ -6445,9 +6445,9 @@ bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
pause->pri_map = 1;
/* rxq setup */
- rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr;
- rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr;
- rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr +
+ rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr;
+ rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr;
+ rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr +
BNX2X_PAGE_SIZE);
/*
@@ -6486,7 +6486,7 @@ bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
return;
}
- txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr;
+ txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -6604,7 +6604,7 @@ bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- (void)rte_memcpy(params.ind_table, rss_obj->ind_table,
+ rte_memcpy(params.ind_table, rss_obj->ind_table,
sizeof(params.ind_table));
if (config_hash) {
@@ -6671,7 +6671,7 @@ bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
/* fill a user request section if needed */
if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) {
- (void)rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
+ rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
ETH_ALEN);
bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
@@ -6879,7 +6879,7 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
sc->link_cnt++;
/* report new link params and remember the state for the next time */
- (void)rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
+ rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
@@ -11059,7 +11059,7 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
for (i = 0; i < L2_ILT_LINES(sc); i++) {
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- (phys_addr_t)sc->context[i].vcxt_dma.paddr;
+ (rte_iova_t)sc->context[i].vcxt_dma.paddr;
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
}
ecore_ilt_init_op(sc, INITOP_SET);
@@ -11357,7 +11357,7 @@ static void bnx2x_reset_port(struct bnx2x_softc *sc)
}
}
-static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, phys_addr_t addr)
+static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr)
{
int reg;
uint32_t wb_write[2];
@@ -11587,7 +11587,7 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
}
static void
-ecore_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
uint32_t addr, uint32_t len)
{
bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 91c5aec2..17075d38 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -18,6 +18,7 @@
#include <rte_byteorder.h>
#include <rte_spinlock.h>
+#include <rte_bus_pci.h>
#include <rte_io.h>
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -317,7 +318,7 @@ struct bnx2x_bar {
/* Used to manage DMA allocations. */
struct bnx2x_dma {
struct bnx2x_softc *sc;
- phys_addr_t paddr;
+ rte_iova_t paddr;
void *vaddr;
int nseg;
char msg[RTE_MEMZONE_NAMESIZE - 6];
@@ -370,10 +371,10 @@ struct bnx2x_fastpath {
struct bnx2x_dma sb_dma;
union bnx2x_host_hc_status_block status_block;
- phys_addr_t tx_desc_mapping;
+ rte_iova_t tx_desc_mapping;
- phys_addr_t rx_desc_mapping;
- phys_addr_t rx_comp_mapping;
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t rx_comp_mapping;
uint16_t *sb_index_values;
uint16_t *sb_running_index;
@@ -468,7 +469,7 @@ union cdu_context {
struct hw_context {
struct bnx2x_dma vcxt_dma;
union cdu_context *vcxt;
- //phys_addr_t cxt_mapping;
+ //rte_iova_t cxt_mapping;
size_t size;
};
@@ -1242,7 +1243,7 @@ struct bnx2x_softc {
uint32_t gz_outlen;
#define GUNZIP_BUF(sc) (sc->gz_buf)
#define GUNZIP_OUTLEN(sc) (sc->gz_outlen)
-#define GUNZIP_PHYS(sc) (phys_addr_t)(sc->gz_buf_dma.paddr)
+#define GUNZIP_PHYS(sc) (rte_iova_t)(sc->gz_buf_dma.paddr)
#define FW_BUF_SIZE 0x40000
struct raw_op *init_ops;
@@ -1310,14 +1311,14 @@ struct bnx2x_softc {
*/
int fw_stats_req_size;
struct bnx2x_fw_stats_req *fw_stats_req;
- phys_addr_t fw_stats_req_mapping;
+ rte_iova_t fw_stats_req_mapping;
/*
* FW statistics data shortcut (points at the beginning of fw_stats
* buffer + fw_stats_req_size).
*/
int fw_stats_data_size;
struct bnx2x_fw_stats_data *fw_stats_data;
- phys_addr_t fw_stats_data_mapping;
+ rte_iova_t fw_stats_data_mapping;
/* tracking a pending STAT_QUERY ramrod */
uint16_t stats_pending;
@@ -1402,8 +1403,8 @@ union bnx2x_stats_show_data {
#define FUNC_FLG_LEADING 0x0020 /* PF only */
struct bnx2x_func_init_params {
- phys_addr_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
- phys_addr_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
+ rte_iova_t fw_stat_map; /* (dma) valid if FUNC_FLG_STATS */
+ rte_iova_t spq_map; /* (dma) valid if FUNC_FLG_SPQ */
uint16_t func_flgs;
uint16_t func_id; /* abs function id */
uint16_t pf_id;
@@ -1525,12 +1526,12 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
#define REG_RD_DMAE(sc, offset, valp, len32) \
do { \
(void)bnx2x_read_dmae(sc, offset, len32); \
- (void)rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \
+ rte_memcpy(valp, BNX2X_SP(sc, wb_data[0]), (len32) * 4); \
} while (0)
#define REG_WR_DMAE(sc, offset, valp, len32) \
do { \
- (void)rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \
+ rte_memcpy(BNX2X_SP(sc, wb_data[0]), valp, (len32) * 4); \
(void)bnx2x_write_dmae(sc, BNX2X_SP_MAPPING(sc, wb_data), offset, len32); \
} while (0)
@@ -1748,7 +1749,7 @@ uint32_t bnx2x_dmae_opcode(struct bnx2x_softc *sc, uint8_t src_type,
uint8_t comp_type);
void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx);
void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32);
-void bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr,
+void bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr,
uint32_t dst_addr, uint32_t len32);
void bnx2x_set_ctx_validation(struct bnx2x_softc *sc, struct eth_context *cxt,
uint32_t cid);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 6f62a37f..95861a06 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -329,7 +329,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
}
-static void
+static int
bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bnx2x_softc *sc = dev->data->dev_private;
@@ -389,6 +389,8 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->imissed = brb_drops + brb_truncates +
brb_truncate_discard + stats->rx_nombuf;
+
+ return 0;
}
static int
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index a9da9de8..967d6dc5 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -35,7 +35,6 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_spinlock.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 5dd4aee7..a0d4ac92 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -71,8 +71,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct bnx2x_softc *sc = dev->data->dev_private;
struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
struct eth_rx_cqe_next_page *nextpg;
- phys_addr_t *rx_bd;
- phys_addr_t busaddr;
+ rte_iova_t *rx_bd;
+ rte_iova_t busaddr;
/* First allocate the rx queue data structure */
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
@@ -108,7 +108,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
bnx2x_rx_queue_release(rxq);
return -ENOMEM;
}
- fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova;
rxq->rx_ring = (uint64_t*)dma->addr;
memset((void *)rxq->rx_ring, 0, dma_size);
@@ -140,7 +140,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
rxq->sw_ring[idx] = mbuf;
- rxq->rx_ring[idx] = mbuf->buf_physaddr;
+ rxq->rx_ring[idx] = mbuf->buf_iova;
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
@@ -154,7 +154,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_RX_LOG(ERR, "RCQ alloc failed");
return -ENOMEM;
}
- fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova;
rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
/* Link the CQ chain pages. */
@@ -289,7 +289,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
bnx2x_tx_queue_release(txq);
return -ENOMEM;
}
- fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+ fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova;
txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
memset(txq->tx_ring, 0, tsize);
@@ -400,7 +400,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
- rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
+ rxq->rx_ring[bd_prod] = new_mb->buf_iova;
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
@@ -422,7 +422,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
rx_mb->vlan_tci = cqe_fp->vlan_tag;
- rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+ rx_mb->ol_flags |= PKT_RX_VLAN;
}
rx_pkts[nb_rx] = rx_mb;
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 2e38ec26..9600e0f1 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -41,7 +41,7 @@ struct bnx2x_rx_queue {
uint16_t rx_cq_head; /**< Index of current rcq bd. */
uint16_t rx_cq_tail; /**< Index of last rcq bd. */
uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
};
@@ -62,7 +62,7 @@ struct bnx2x_tx_queue {
uint16_t nb_tx_avail; /**< Number of TX descriptors available. */
uint16_t nb_tx_pages; /**< number of TX pages */
uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data */
};
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6223cfef..b9b85963 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1338,7 +1338,7 @@ bnx2x_prep_fw_stats_req(struct bnx2x_softc *sc)
int i;
int first_queue_query_index;
struct stats_query_header *stats_hdr = &sc->fw_stats_req->hdr;
- phys_addr_t cur_data_offset;
+ rte_iova_t cur_data_offset;
struct stats_query_entry *cur_query_entry;
stats_hdr->cmd_num = sc->fw_stats_num;
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 0ca0df87..3c08f2a2 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -113,7 +113,7 @@ bnx2x_vf_finalize(struct bnx2x_softc *sc,
#define BNX2X_VF_CHANNEL_TRIES 100
static int
-bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr)
+bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
{
uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
uint8_t i;
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index e7ec96e9..ff40413c 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -36,7 +36,7 @@
#include "ecore_reg.h"
struct bnx2x_softc;
-typedef phys_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */
+typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */
typedef volatile int ecore_atomic_t;
@@ -113,7 +113,7 @@ typedef rte_spinlock_t ECORE_MUTEX_SPIN;
#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
-#define ECORE_MEMCPY(_a, _b, _s) (void)rte_memcpy(_a, _b, _s)
+#define ECORE_MEMCPY(_a, _b, _s) rte_memcpy(_a, _b, _s)
#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
#define ECORE_CPU_TO_LE16(x) htole16(x)
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index b03f65dc..2aa04411 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -40,10 +40,13 @@ LIB = librte_pmd_bnxt.a
EXPORT_MAP := rte_pmd_bnxt_version.map
-LIBABIVER := 1
+LIBABIVER := 2
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_bnxt_version.map
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 405d94de..646fe79e 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -39,6 +39,7 @@
#include <sys/queue.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
#include <rte_memory.h>
#include <rte_lcore.h>
@@ -126,13 +127,13 @@ struct bnxt_pf_info {
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
- uint8_t port_id;
+ uint16_t port_id;
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
uint32_t func_cfg_flags;
void *vf_req_buf;
- phys_addr_t vf_req_buf_dma_addr;
+ rte_iova_t vf_req_buf_dma_addr;
uint32_t vf_req_fwd[8];
uint16_t total_vnics;
struct bnxt_child_vf_info *vf_info;
@@ -171,11 +172,18 @@ struct bnxt_cos_queue_info {
uint8_t profile;
};
+struct rte_flow {
+ STAILQ_ENTRY(rte_flow) next;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic;
+};
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
struct rte_eth_dev *eth_dev;
+ struct rte_eth_rss_conf rss_conf;
struct rte_pci_device *pdev;
uint32_t flags;
@@ -184,6 +192,7 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS (1 << 2)
#define BNXT_FLAG_JUMBO (1 << 3)
#define BNXT_FLAG_SHORT_CMD (1 << 4)
+#define BNXT_FLAG_UPDATE_HASH (1 << 5)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
@@ -194,14 +203,14 @@ struct bnxt {
struct bnxt_rx_queue **rx_queues;
const void *rx_mem_zone;
struct rx_port_stats *hw_rx_port_stats;
- phys_addr_t hw_rx_port_stats_map;
+ rte_iova_t hw_rx_port_stats_map;
unsigned int tx_nr_rings;
unsigned int tx_cp_nr_rings;
struct bnxt_tx_queue **tx_queues;
const void *tx_mem_zone;
struct tx_port_stats *hw_tx_port_stats;
- phys_addr_t hw_tx_port_stats_map;
+ rte_iova_t hw_tx_port_stats_map;
/* Default completion ring */
struct bnxt_cp_ring_info *def_cp_ring;
@@ -217,7 +226,7 @@ struct bnxt {
STAILQ_HEAD(, bnxt_filter_info) free_filter_list;
/* VNIC pointer for flow filter (VMDq) pools */
-#define MAX_FF_POOLS ETH_64_POOLS
+#define MAX_FF_POOLS 256
STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS];
struct bnxt_irq *irq_tbl;
@@ -227,9 +236,9 @@ struct bnxt {
uint16_t hwrm_cmd_seq;
void *hwrm_cmd_resp_addr;
- phys_addr_t hwrm_cmd_resp_dma_addr;
+ rte_iova_t hwrm_cmd_resp_dma_addr;
void *hwrm_short_cmd_req_addr;
- phys_addr_t hwrm_short_cmd_req_dma_addr;
+ rte_iova_t hwrm_short_cmd_req_dma_addr;
rte_spinlock_t hwrm_lock;
uint16_t max_req_len;
uint16_t max_resp_len;
@@ -269,4 +278,5 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6
bool is_bnxt_supported(struct rte_eth_dev *dev);
+extern const struct rte_flow_ops bnxt_flow_ops;
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 68979bc4..26b2755e 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -183,8 +183,10 @@ void bnxt_free_def_cp_ring(struct bnxt *bp)
return;
bnxt_free_ring(cpr->cp_ring_struct);
+ cpr->cp_ring_struct = NULL;
rte_free(cpr->cp_ring_struct);
rte_free(cpr);
+ bp->def_cp_ring = NULL;
}
/* For the default completion ring only */
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index a6e87858..ce2b0cb8 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -41,6 +41,9 @@
(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
!((raw_cons) & ((ring)->ring_size)))
+#define CMPL_VALID(cmp, v) \
+ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v))
+
#define CMP_TYPE(cmp) \
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
@@ -48,6 +51,7 @@
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val))
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
@@ -82,15 +86,15 @@ struct bnxt_cp_ring_info {
struct cmpl_base *cp_desc_ring;
- phys_addr_t cp_desc_mapping;
+ rte_iova_t cp_desc_mapping;
struct ctx_hw_stats *hw_stats;
- phys_addr_t hw_stats_map;
+ rte_iova_t hw_stats_map;
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
uint16_t cp_cons;
- bool v;
+ bool valid;
};
#define RX_CMP_L2_ERRORS \
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index c9d11228..e8c7d0e7 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -53,6 +53,7 @@
#include "bnxt_txr.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
+#include "bnxt_nvm_defs.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
@@ -144,7 +145,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
-static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
/***********************/
@@ -201,8 +202,16 @@ static int bnxt_init_chip(struct bnxt *bp)
{
unsigned int i, rss_idx, fw_idx;
struct rte_eth_link new;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ uint32_t queue_id, base = BNXT_MISC_VEC_ID;
+ uint32_t vec = BNXT_MISC_VEC_ID;
int rc;
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
if (bp->eth_dev->data->mtu > ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
bp->flags |= BNXT_FLAG_JUMBO;
@@ -305,6 +314,48 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
+ bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = bp->eth_dev->data->nb_rx_queues;
+ RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
+ intr_vector);
+ if (intr_vector > bp->rx_cp_nr_rings) {
+ RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ bp->rx_cp_nr_rings);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ bp->eth_dev->data->nb_rx_queues *
+ sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ " intr_vec", bp->eth_dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
+ __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->max_intr);
+ }
+
+ for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
+ queue_id++) {
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
@@ -360,27 +411,38 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
+ unsigned int max_rx_rings;
dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* MAC Specifics */
- dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
+ dev_info->max_mac_addrs = bp->max_l2_ctx;
dev_info->max_hash_mac_addrs = 0;
/* PF/VF specifics */
if (BNXT_PF(bp))
dev_info->max_vfs = bp->pdev->max_vfs;
- dev_info->max_rx_queues = bp->max_rx_rings;
- dev_info->max_tx_queues = bp->max_tx_rings;
+ max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx,
+ bp->max_stat_ctx)));
+ /* For the sake of symmetry, max_rx_queues = max_tx_queues */
+ dev_info->max_rx_queues = max_rx_rings;
+ dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
- dev_info->rx_offload_capa = 0;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
@@ -414,6 +476,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
};
eth_dev->data->dev_conf.intr_conf.lsc = 1;
+ eth_dev->data->dev_conf.intr_conf.rxq = 1;
+
/* *INDENT-ON* */
/*
@@ -489,13 +553,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
if (link->link_status)
RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
- (uint8_t)(eth_dev->data->port_id),
+ eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
RTE_LOG(INFO, PMD, "Port %d Link Down\n",
- (uint8_t)(eth_dev->data->port_id));
+ eth_dev->data->port_id);
}
static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
@@ -510,6 +574,11 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int vlan_mask = 0;
int rc;
+ if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ RTE_LOG(ERR, PMD,
+ "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
+ bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ }
bp->dev_stopped = 0;
rc = bnxt_init_nic(bp);
@@ -522,7 +591,9 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
vlan_mask |= ETH_VLAN_FILTER_MASK;
if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
vlan_mask |= ETH_VLAN_STRIP_MASK;
- bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+ rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
+ if (rc)
+ goto error;
return 0;
@@ -593,13 +664,14 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
- int i;
+ uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
+ uint32_t i;
/*
* Loop through all VNICs from the specified filter flow pools to
* remove the corresponding MAC addr filter
*/
- for (i = 0; i < MAX_FF_POOLS; i++) {
+ for (i = 0; i < pool; i++) {
if (!(pool_mask & (1ULL << i)))
continue;
@@ -610,7 +682,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
if (filter->mac_index == index) {
STAILQ_REMOVE(&vnic->filter, filter,
bnxt_filter_info, next);
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
filter->mac_index = INVALID_MAC_INDEX;
memset(&filter->l2_addr, 0,
ETHER_ADDR_LEN);
@@ -657,7 +729,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -827,11 +899,15 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- return -EINVAL;
+ RTE_LOG(ERR, PMD, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
}
+
+ bp->flags |= BNXT_FLAG_UPDATE_HASH;
+ memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
+
if (rss_conf->rss_hf & ETH_RSS_IPV4)
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
@@ -1147,7 +1223,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
/* Must delete the filter */
STAILQ_REMOVE(&vnic->filter, filter,
bnxt_filter_info, next);
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_INSERT_TAIL(
&bp->free_filter_list,
filter, next);
@@ -1173,7 +1249,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
memcpy(new_filter->l2_addr,
filter->l2_addr, ETHER_ADDR_LEN);
/* MAC only filter */
- rc = bnxt_hwrm_set_filter(bp,
+ rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
new_filter);
if (rc)
@@ -1225,7 +1301,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
/* Must delete the MAC filter */
STAILQ_REMOVE(&vnic->filter, filter,
bnxt_filter_info, next);
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
filter->l2_ovlan = 0;
STAILQ_INSERT_TAIL(
&bp->free_filter_list,
@@ -1248,8 +1324,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter->l2_ovlan = vlan_id;
new_filter->l2_ovlan_mask = 0xF000;
new_filter->enables |= en;
- rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id,
- new_filter);
+ rc = bnxt_hwrm_set_l2_filter(bp,
+ vnic->fw_vnic_id,
+ new_filter);
if (rc)
goto exit;
RTE_LOG(INFO, PMD,
@@ -1275,7 +1352,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
return bnxt_del_vlan_filter(bp, vlan_id);
}
-static void
+static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
@@ -1307,6 +1384,8 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_EXTEND_MASK)
RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+
+ return 0;
}
static void
@@ -1328,7 +1407,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
/* Default Filter is at Index 0 */
if (filter->mac_index != 0)
continue;
- rc = bnxt_hwrm_clear_filter(bp, filter);
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
break;
memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
@@ -1337,7 +1416,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
filter->enables |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
- rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
break;
filter->mac_index = 0;
@@ -1517,6 +1596,1083 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev)
return bnxt_hwrm_port_led_cfg(bp, false);
}
+static uint32_t
+bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ uint32_t desc = 0, raw_cons = 0, cons;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_queue *rxq;
+ struct rx_pkt_cmpl *rxcmp;
+ uint16_t cmp_type;
+ uint8_t cmp = 1;
+ bool valid;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ cpr = rxq->cp_ring;
+ valid = cpr->valid;
+
+ while (raw_cons < rxq->nb_rx_desc) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMPL_VALID(rxcmp, valid))
+ goto nothing_to_do;
+ valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
+ cmp = (rte_le_to_cpu_32(
+ ((struct rx_tpa_end_cmpl *)
+ (rxcmp))->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
+ RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ desc++;
+ } else if (cmp_type == 0x11) {
+ desc++;
+ cmp = (rxcmp->agg_bufs_v1 &
+ RX_PKT_CMPL_AGG_BUFS_MASK) >>
+ RX_PKT_CMPL_AGG_BUFS_SFT;
+ } else {
+ cmp = 1;
+ }
+nothing_to_do:
+ raw_cons += cmp ? cmp : 2;
+ }
+
+ return desc;
+}
+
+static int
+bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_pkt_cmpl *rxcmp;
+ uint32_t cons, cp_cons;
+
+ if (!rxq)
+ return -EINVAL;
+
+ cpr = rxq->cp_ring;
+ rxr = rxq->rx_ring;
+
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ cp_cons = cpr->cp_raw_cons;
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(rxcmp, cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ } else {
+ if (CMPL_VALID(rxcmp, !cpr->valid))
+ return RTE_ETH_RX_DESC_DONE;
+ }
+ rx_buf = &rxr->rx_buf_ring[cons];
+ if (rx_buf->mbuf == NULL)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static int
+bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
+{
+ struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct tx_pkt_cmpl *txcmp;
+ uint32_t cons, cp_cons;
+
+ if (!txq)
+ return -EINVAL;
+
+ cpr = txq->cp_ring;
+ txr = txq->tx_ring;
+
+ if (offset >= txq->nb_tx_desc)
+ return -EINVAL;
+
+ cons = RING_CMP(cpr->cp_ring_struct, offset);
+ txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ cp_cons = cpr->cp_raw_cons;
+
+ if (cons > cp_cons) {
+ if (CMPL_VALID(txcmp, cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ } else {
+ if (CMPL_VALID(txcmp, !cpr->valid))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+ }
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (tx_buf->mbuf == NULL)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+static struct bnxt_filter_info *
+bnxt_match_and_validate_ether_filter(struct bnxt *bp,
+ struct rte_eth_ethertype_filter *efilter,
+ struct bnxt_vnic_info *vnic0,
+ struct bnxt_vnic_info *vnic,
+ int *ret)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+ int match = 0;
+ *ret = 0;
+
+ if (efilter->ether_type != ETHER_TYPE_IPv4 &&
+ efilter->ether_type != ETHER_TYPE_IPv6) {
+ RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", efilter->ether_type);
+ *ret = -EINVAL;
+ goto exit;
+ }
+ if (efilter->queue >= bp->rx_nr_rings) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ goto exit;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+ if (vnic == NULL) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ *ret = -EINVAL;
+ goto exit;
+ }
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->flags ==
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
+ mfilter->ethertype == efilter->ether_type)) {
+ match = 1;
+ break;
+ }
+ }
+ } else {
+ STAILQ_FOREACH(mfilter, &vnic->filter, next)
+ if ((!memcmp(efilter->mac_addr.addr_bytes,
+ mfilter->l2_addr, ETHER_ADDR_LEN) &&
+ mfilter->ethertype == efilter->ether_type &&
+ mfilter->flags ==
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (match)
+ *ret = -EEXIST;
+
+exit:
+ return mfilter;
+}
+
+static int
+bnxt_ethertype_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_ethertype_filter *efilter =
+ (struct rte_eth_ethertype_filter *)arg;
+ struct bnxt_filter_info *bfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret < 0)
+ return ret;
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
+ ETHER_ADDR_LEN);
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
+ bfilter->ethertype = efilter->ether_type;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto cleanup;
+ }
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+
+ if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ bfilter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ }
+
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto cleanup;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
+ vnic0, vnic, &ret);
+ if (ret == -EEXIST) {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
+
+ STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
+ next);
+ bnxt_free_filter(bp, filter1);
+ } else if (ret == 0) {
+ RTE_LOG(ERR, PMD, "No matching filter found\n");
+ }
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ goto error;
+ }
+ return ret;
+cleanup:
+ bnxt_free_filter(bp, bfilter);
+error:
+ return ret;
+}
+
+static inline int
+parse_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ struct bnxt_filter_info *bfilter)
+{
+ uint32_t en = 0;
+
+ if (nfilter->queue >= bp->rx_nr_rings) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_port_mask) {
+ case UINT16_MAX:
+ bfilter->dst_port_mask = -1;
+ bfilter->dst_port = nfilter->dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+
+ switch (nfilter->proto_mask) {
+ case UINT8_MAX:
+ if (nfilter->proto == 17) /* IPPROTO_UDP */
+ bfilter->ip_protocol = 17;
+ else if (nfilter->proto == 6) /* IPPROTO_TCP */
+ bfilter->ip_protocol = 6;
+ else
+ return -EINVAL;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->dst_ip_mask) {
+ case UINT32_MAX:
+ bfilter->dst_ipaddr_mask[0] = -1;
+ bfilter->dst_ipaddr[0] = nfilter->dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_ip_mask) {
+ case UINT32_MAX:
+ bfilter->src_ipaddr_mask[0] = -1;
+ bfilter->src_ipaddr[0] = nfilter->src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (nfilter->src_port_mask) {
+ case UINT16_MAX:
+ bfilter->src_port_mask = -1;
+ bfilter->src_port = nfilter->src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ //TODO Priority
+ //nfilter->priority = (uint8_t)filter->priority;
+
+ bfilter->enables = en;
+ return 0;
+}
+
+static struct bnxt_filter_info*
+bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
+ struct bnxt_filter_info *bfilter)
+{
+ struct bnxt_filter_info *mfilter = NULL;
+
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables)
+ return mfilter;
+ }
+ return NULL;
+}
+
+static int
+bnxt_cfg_ntuple_filter(struct bnxt *bp,
+ struct rte_eth_ntuple_filter *nfilter,
+ enum rte_filter_op filter_op)
+{
+ struct bnxt_filter_info *bfilter, *mfilter, *filter1;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ int ret;
+
+ if (nfilter->flags != RTE_5TUPLE_FLAGS) {
+ RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ return -EINVAL;
+ }
+
+ bfilter = bnxt_get_unused_filter(bp);
+ if (bfilter == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Not enough resources for a new filter.\n");
+ return -ENOMEM;
+ }
+ ret = parse_ntuple_filter(bp, nfilter, bfilter);
+ if (ret < 0)
+ goto free_filter;
+
+ vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ if (filter1 == NULL) {
+ ret = -1;
+ goto free_filter;
+ }
+
+ bfilter->dst_id = vnic->fw_vnic_id;
+ bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ bfilter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ bfilter->ethertype = 0x800;
+ bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+
+ mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
+
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
+ RTE_LOG(ERR, PMD, "filter exists.");
+ ret = -EEXIST;
+ goto free_filter;
+ }
+ if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
+ RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ ret = -ENOENT;
+ goto free_filter;
+ }
+
+ if (filter_op == RTE_ETH_FILTER_ADD) {
+ bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
+ if (ret)
+ goto free_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
+ } else {
+ if (mfilter == NULL) {
+ /* This should not happen. But for Coverity! */
+ ret = -ENOENT;
+ goto free_filter;
+ }
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
+
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
+ next);
+ bnxt_free_filter(bp, mfilter);
+ bfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ }
+
+ return 0;
+free_filter:
+ bfilter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, bfilter);
+ return ret;
+}
+
+static int
+bnxt_ntuple_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = bnxt_cfg_ntuple_filter(bp,
+ (struct rte_eth_ntuple_filter *)arg,
+ filter_op);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+bnxt_parse_fdir_filter(struct bnxt *bp,
+ struct rte_eth_fdir_filter *fdir,
+ struct bnxt_filter_info *filter)
+{
+ enum rte_fdir_mode fdir_mode =
+ bp->eth_dev->data->dev_conf.fdir_conf.mode;
+ struct bnxt_vnic_info *vnic0, *vnic;
+ struct bnxt_filter_info *filter1;
+ uint32_t en = 0;
+ int i;
+
+ if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return -EINVAL;
+
+ filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+
+ switch (fdir->input.flow_type) {
+ case RTE_ETH_FLOW_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ /* FALLTHROUGH */
+ filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ filter->src_port = fdir->input.flow.tcp4_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = 6;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ filter->src_port = fdir->input.flow.udp4_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ filter->ip_protocol = 17;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ filter->src_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->dst_ipaddr_mask[0] = 0xffffffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ filter->ethertype = 0x800;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ /* FALLTHROUGH */
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.ipv6_flow.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.ipv6_flow.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ filter->src_port = fdir->input.flow.tcp6_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.tcp6_flow.ip.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ filter->src_port = fdir->input.flow.udp6_flow.src_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
+ filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ filter->dst_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ filter->src_port_mask = 0xffff;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ filter->ip_addr_type =
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ rte_memcpy(filter->src_ipaddr,
+ fdir->input.flow.udp6_flow.ip.src_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
+ rte_memcpy(filter->dst_ipaddr,
+ fdir->input.flow.udp6_flow.ip.dst_ip, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ memset(filter->dst_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ memset(filter->src_ipaddr_mask, 0xff, 16);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ filter->ethertype = 0x86dd;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ filter->ethertype = fdir->input.flow.l2_flow.ether_type;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
+ break;
+ case RTE_ETH_FLOW_VXLAN:
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ return -EINVAL;
+ filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
+ break;
+ case RTE_ETH_FLOW_NVGRE:
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ return -EINVAL;
+ filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
+ break;
+ case RTE_ETH_FLOW_UNKNOWN:
+ case RTE_ETH_FLOW_RAW:
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_IPV6_EX:
+ case RTE_ETH_FLOW_IPV6_TCP_EX:
+ case RTE_ETH_FLOW_IPV6_UDP_EX:
+ case RTE_ETH_FLOW_GENEVE:
+ /* FALLTHROUGH */
+ default:
+ return -EINVAL;
+ }
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+ if (vnic == NULL) {
+ RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+
+ if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ rte_memcpy(filter->dst_macaddr,
+ fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ } else {
+ filter->dst_id = vnic->fw_vnic_id;
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ if (filter->dst_macaddr[i] == 0x00)
+ filter1 = STAILQ_FIRST(&vnic0->filter);
+ else
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ }
+
+ if (filter1 == NULL)
+ return -EINVAL;
+
+ en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+
+ filter->enables = en;
+
+ return 0;
+}
+
+static struct bnxt_filter_info *
+bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
+{
+ struct bnxt_filter_info *mf = NULL;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(mf, &vnic->filter, next) {
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask)))
+ return mf;
+ }
+ }
+ return NULL;
+}
+
+static int
+bnxt_fdir_filter(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
+ struct bnxt_filter_info *filter, *match;
+ struct bnxt_vnic_info *vnic;
+ int ret = 0, i;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ case RTE_ETH_FILTER_DELETE:
+ /* FALLTHROUGH */
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_parse_fdir_filter(bp, fdir, filter);
+ if (ret != 0)
+ goto free_filter;
+ filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
+
+ match = bnxt_match_fdir(bp, filter);
+ if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
+ RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ ret = -EEXIST;
+ goto free_filter;
+ }
+ if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
+ RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ ret = -ENOENT;
+ goto free_filter;
+ }
+
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ vnic = STAILQ_FIRST(&bp->ff_pool[0]);
+ else
+ vnic =
+ STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+
+ if (filter_op == RTE_ETH_FILTER_ADD) {
+ ret = bnxt_hwrm_set_ntuple_filter(bp,
+ filter->dst_id,
+ filter);
+ if (ret)
+ goto free_filter;
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ } else {
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
+ STAILQ_REMOVE(&vnic->filter, match,
+ bnxt_filter_info, next);
+ bnxt_free_filter(bp, match);
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+ }
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->filter_type ==
+ HWRM_CFA_NTUPLE_FILTER) {
+ ret =
+ bnxt_hwrm_clear_ntuple_filter(bp,
+ filter);
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ }
+ }
+ }
+ return ret;
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_INFO:
+ /* FALLTHROUGH */
+ RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+
+free_filter:
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+ return ret;
+}
+
+static int
+bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ RTE_LOG(ERR, PMD,
+ "filter type: %d: To be implemented\n", filter_type);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = bnxt_fdir_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = bnxt_ntuple_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = bnxt_ethertype_filter(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &bnxt_flow_ops;
+ break;
+ default:
+ RTE_LOG(ERR, PMD,
+ "Filter type (%d) not supported", filter_type);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static const uint32_t *
+bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == bnxt_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+
+
+static int
+bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ int rc;
+ uint32_t dir_entries;
+ uint32_t entry_length;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
+ __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
+
+ rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ return dir_entries * entry_length;
+}
+
+static int
+bnxt_get_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint32_t index;
+ uint32_t offset;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", __func__, bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (in_eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(bp, in_eeprom->length,
+ in_eeprom->data);
+
+ index = in_eeprom->offset >> 24;
+ offset = in_eeprom->offset & 0xffffff;
+
+ if (index != 0)
+ return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
+ in_eeprom->length, in_eeprom->data);
+
+ return 0;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(uint16_t dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_other_exec_format(dir_type);
+}
+
+static int
+bnxt_set_eeprom_op(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint8_t index, dir_op;
+ uint16_t type, ext, ordinal, attr;
+
+ RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", __func__, bp->pdev->addr.domain,
+ bp->pdev->addr.bus, bp->pdev->addr.devid,
+ bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
+
+ if (!BNXT_PF(bp)) {
+ RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ return -EINVAL;
+ }
+
+ type = in_eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = in_eeprom->magic & 0xff;
+ dir_op = in_eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (in_eeprom->offset != ~in_eeprom->magic)
+ return -EINVAL;
+ return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EOPNOTSUPP;
+ ext = in_eeprom->magic & 0xffff;
+ ordinal = in_eeprom->offset >> 16;
+ attr = in_eeprom->offset & 0xffff;
+
+ return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
+ in_eeprom->data, in_eeprom->length);
+ return 0;
+}
+
/*
* Initialization
*/
@@ -1535,6 +2691,8 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_release = bnxt_rx_queue_release_op,
.tx_queue_setup = bnxt_tx_queue_setup_op,
.tx_queue_release = bnxt_tx_queue_release_op,
+ .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
+ .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
.reta_update = bnxt_reta_update_op,
.reta_query = bnxt_reta_query_op,
.rss_hash_update = bnxt_rss_hash_update_op,
@@ -1564,6 +2722,16 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.txq_info_get = bnxt_txq_info_get_op,
.dev_led_on = bnxt_dev_led_on_op,
.dev_led_off = bnxt_dev_led_off_op,
+ .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
+ .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
+ .rx_queue_count = bnxt_rx_queue_count_op,
+ .rx_descriptor_status = bnxt_rx_descriptor_status_op,
+ .tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .filter_ctrl = bnxt_filter_ctrl_op,
+ .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
+ .get_eeprom_length = bnxt_get_eeprom_length_op,
+ .get_eeprom = bnxt_get_eeprom_op,
+ .set_eeprom = bnxt_set_eeprom_op,
};
static bool bnxt_vf_pciid(uint16_t id)
@@ -1628,7 +2796,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
const struct rte_memzone *mz = NULL;
static int version_printed;
uint32_t total_alloc_len;
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
struct bnxt *bp;
int rc;
@@ -1636,13 +2804,15 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
bp = eth_dev->data->dev_private;
rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
bp->dev_stopped = 1;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ goto skip_init;
+
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
@@ -1652,7 +2822,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
"Board initialization failed rc: %x\n", rc);
goto error;
}
+skip_init:
eth_dev->dev_ops = &bnxt_dev_ops;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
@@ -1674,13 +2847,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
@@ -1709,13 +2882,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
@@ -1755,11 +2928,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
- ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
+ ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
RTE_LOG(ERR, PMD,
"Failed to alloc %u bytes needed to store MAC addr tbl",
- ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
+ ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
goto error_free;
}
@@ -1798,6 +2971,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
+ ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
RTE_LOG(ERR, PMD,
@@ -1877,6 +3052,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index e9aac271..65d30fb3 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -35,6 +35,9 @@
#include <rte_log.h>
#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
#include "bnxt.h"
#include "bnxt_filter.h"
@@ -94,6 +97,8 @@ void bnxt_init_filters(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
filter->fw_l2_filter_id = -1;
+ filter->fw_em_filter_id = -1;
+ filter->fw_ntuple_filter_id = -1;
STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
}
}
@@ -121,7 +126,7 @@ void bnxt_free_all_filters(struct bnxt *bp)
for (i = 0; i < bp->pf.max_vfs; i++) {
STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
}
}
}
@@ -142,7 +147,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
- rc = bnxt_hwrm_clear_filter(bp, filter);
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
RTE_LOG(ERR, PMD,
"HWRM filter cannot be freed rc = %d\n",
@@ -174,3 +179,1031 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
bp->filter_info = filter_mem;
return 0;
}
+
+struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+
+ /* Find the 1st unused filter from the free_filter_list pool*/
+ filter = STAILQ_FIRST(&bp->free_filter_list);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+ return filter;
+}
+
+void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+}
+
+static int
+bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static const struct rte_flow_item *
+nxt_non_void_pattern(const struct rte_flow_item *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static const struct rte_flow_action *
+nxt_non_void_action(const struct rte_flow_action *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static inline int check_zero_bytes(const uint8_t *bytes, int len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ if (bytes[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+static int
+bnxt_filter_type_check(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error __rte_unused)
+{
+ const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
+ int use_ntuple = 1;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ use_ntuple = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ use_ntuple = 0;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* FALLTHROUGH */
+ /* need ntuple match, reset exact match */
+ if (!use_ntuple) {
+ RTE_LOG(ERR, PMD,
+ "VLAN flow cannot use NTUPLE filter\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Cannot use VLAN with NTUPLE");
+ return -rte_errno;
+ }
+ use_ntuple |= 1;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Unknown Flow type");
+ use_ntuple |= 1;
+ }
+ item++;
+ }
+ return use_ntuple;
+}
+
+static int
+bnxt_validate_and_parse_flow_type(struct bnxt *bp,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t tenant_id_be = 0;
+ bool vni_masked = 0;
+ bool tni_masked = 0;
+ uint32_t vf = 0;
+ int use_ntuple;
+ uint32_t en = 0;
+ int dflt_vnic;
+
+ use_ntuple = bnxt_filter_type_check(pattern, error);
+ RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ if (use_ntuple < 0)
+ return use_ntuple;
+
+ filter->filter_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->last) {
+ /* last or range is NOT supported as match criteria */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "No support for range");
+ return -rte_errno;
+ }
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "spec/mask is NULL");
+ return -rte_errno;
+ }
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Source MAC address mask cannot be partially set.
+ * Should be All 0's or all 1's.
+ * Destination MAC address mask must not be partially
+ * set. Should be all 1's or all 0's.
+ */
+ if ((!is_zero_ether_addr(&eth_mask->src) &&
+ !is_broadcast_ether_addr(&eth_mask->src)) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MAC_addr mask not valid");
+ return -rte_errno;
+ }
+
+ /* Mask is not allowed. Only exact matches are */
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ethertype mask not valid");
+ return -rte_errno;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ rte_memcpy(filter->dst_macaddr,
+ &eth_spec->dst, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+ if (is_broadcast_ether_addr(&eth_mask->src)) {
+ rte_memcpy(filter->src_macaddr,
+ &eth_spec->src, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
+ } /*
+ * else {
+ * RTE_LOG(ERR, PMD, "Handle this condition\n");
+ * }
+ */
+ if (eth_spec->type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(eth_spec->type);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+ EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
+ /* Only the VLAN ID can be matched. */
+ filter->l2_ovlan =
+ rte_be_to_cpu_16(vlan_spec->tci &
+ 0xFFF);
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN mask is invalid");
+ return -rte_errno;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ /* If mask is not involved, we could use EM filters. */
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+ filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
+ filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+ if (ipv4_mask->hdr.src_addr) {
+ filter->src_ipaddr_mask[0] =
+ ipv4_mask->hdr.src_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->dst_ipaddr_mask[0] =
+ ipv4_mask->hdr.dst_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+ filter->ip_addr_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+ if (ipv4_spec->hdr.next_proto_id) {
+ filter->ip_protocol =
+ ipv4_spec->hdr.next_proto_id;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask.");
+ return -rte_errno;
+ }
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+ rte_memcpy(filter->src_ipaddr,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->dst_ipaddr,
+ ipv6_spec->hdr.dst_addr, 16);
+ if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
+ rte_memcpy(filter->src_ipaddr_mask,
+ ipv6_mask->hdr.src_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+ if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
+ rte_memcpy(filter->dst_ipaddr_mask,
+ ipv6_mask->hdr.dst_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+ filter->ip_addr_type = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
+ EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /* Check TCP mask. Only DST & SRC ports are maskable */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+ if (tcp_mask->hdr.dst_port) {
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+ if (tcp_mask->hdr.src_port) {
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = udp_spec->hdr.src_port;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (udp_mask->hdr.dst_port) {
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+ if (udp_mask->hdr.src_port) {
+ filter->src_port_mask = udp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
+ vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
+ vxlan_spec->flags != 0x8) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ vni_masked =
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (vni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
+ nvgre_spec->protocol != 0x6558) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ vf = vf_spec->id;
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Configuring on a VF!");
+ return -rte_errno;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Incorrect VF id!");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver
+ * loaded. This is not an error.
+ */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unable to get default VNIC for VF");
+ return -rte_errno;
+ }
+ filter->mirror_vnic_id = dflt_vnic;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+ break;
+ default:
+ break;
+ }
+ item++;
+ }
+ filter->enables = en;
+
+ return 0;
+}
+
+/* Parse attributes */
+static int
+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "No support for egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "No support for priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "No support for group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+struct bnxt_filter_info *
+bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter1, *f0;
+ struct bnxt_vnic_info *vnic0;
+ int rc;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ f0 = STAILQ_FIRST(&vnic0->filter);
+
+ //This flow has same DST MAC as the port/l2 filter.
+ if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
+ return f0;
+
+ //This flow needs DST MAC which is not same as port/l2
+ RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ filter1 = bnxt_get_unused_filter(bp);
+ if (filter1 == NULL)
+ return NULL;
+ filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
+ memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
+ memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter1);
+ if (rc) {
+ bnxt_free_filter(bp, filter1);
+ return NULL;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
+ return filter1;
+}
+
+static int
+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_action *act = nxt_non_void_action(actions);
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_filter_info *filter1;
+ uint32_t vf = 0;
+ int dflt_vnic;
+ int rc;
+
+ if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Cannot create flow on RSS queues");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
+ if (rc != 0)
+ goto ret;
+
+ rc = bnxt_flow_parse_attr(attr, error);
+ if (rc != 0)
+ goto ret;
+ //Since we support ingress attribute only - right now.
+ filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Allow this flow. Redirect to a VNIC. */
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ if (act_q->index >= bp->rx_nr_rings) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+ if (vnic == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "No matching VNIC for queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ filter->dst_id = vnic->fw_vnic_id;
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags =
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
+ else
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VF:
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ vf = act_vf->id;
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Configuring on a VF!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Incorrect VF id!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Unable to get default VNIC for VF");
+ rc = -rte_errno;
+ goto ret;
+ }
+ filter->mirror_vnic_id = dflt_vnic;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ break;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+//done:
+ act = nxt_non_void_action(++act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ret:
+ return rc;
+}
+
+static int
+bnxt_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ int ret = 0;
+
+ ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
+ if (ret != 0)
+ return ret;
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ /* No need to hold on to this filter if we are just validating flow */
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+
+ return ret;
+}
+
+static int
+bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
+{
+ struct bnxt_filter_info *mf;
+ struct rte_flow *flow;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ mf = flow->filter;
+
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask)))
+ return -EEXIST;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bnxt_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic = NULL;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
+ if (ret != 0) {
+ RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ goto free_flow;
+ }
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ goto free_flow;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ if (ret != 0)
+ goto free_filter;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret != 0) {
+ RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ goto free_filter;
+ }
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+ filter->enables |=
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+ }
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ filter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+ }
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (filter->dst_id == vnic->fw_vnic_id)
+ break;
+ }
+
+ if (!ret) {
+ flow->filter = filter;
+ flow->vnic = vnic;
+ RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+ return flow;
+ }
+free_filter:
+ filter->fw_l2_filter_id = -1;
+ bnxt_free_filter(bp, filter);
+free_flow:
+ if (ret == -EEXIST)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Matching Flow exists.");
+ else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ flow = NULL;
+ return flow;
+}
+
+static int
+bnxt_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter = flow->filter;
+ struct bnxt_vnic_info *vnic = flow->vnic;
+ int ret = 0;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == 0)
+ RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ if (!ret) {
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ } else {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ struct bnxt_filter_info *filter = flow->filter;
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to flush flow in HW.");
+ return -rte_errno;
+ }
+
+ STAILQ_REMOVE(&vnic->flow_list, flow,
+ rte_flow, next);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops bnxt_flow_ops = {
+ .validate = bnxt_flow_validate,
+ .create = bnxt_flow_create,
+ .destroy = bnxt_flow_destroy,
+ .flush = bnxt_flow_flush,
+};
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 613b2eea..2591a87e 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -40,8 +40,15 @@ struct bnxt;
struct bnxt_filter_info {
STAILQ_ENTRY(bnxt_filter_info) next;
uint64_t fw_l2_filter_id;
+ uint64_t fw_em_filter_id;
+ uint64_t fw_ntuple_filter_id;
#define INVALID_MAC_INDEX ((uint16_t)-1)
uint16_t mac_index;
+#define HWRM_CFA_L2_FILTER 0
+#define HWRM_CFA_EM_FILTER 1
+#define HWRM_CFA_NTUPLE_FILTER 2
+ uint8_t filter_type; //L2 or EM or NTUPLE filter
+ uint32_t dst_id;
/* Filter Characteristics */
uint32_t flags;
@@ -65,6 +72,19 @@ struct bnxt_filter_info {
uint64_t l2_filter_id_hint;
uint32_t src_id;
uint8_t src_type;
+ uint8_t src_macaddr[6];
+ uint8_t dst_macaddr[6];
+ uint32_t dst_ipaddr[4];
+ uint32_t dst_ipaddr_mask[4];
+ uint32_t src_ipaddr[4];
+ uint32_t src_ipaddr_mask[4];
+ uint16_t dst_port;
+ uint16_t dst_port_mask;
+ uint16_t src_port;
+ uint16_t src_port_mask;
+ uint16_t ip_protocol;
+ uint16_t ip_addr_type;
+ uint16_t ethertype;
};
struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
@@ -73,5 +93,73 @@ void bnxt_init_filters(struct bnxt *bp);
void bnxt_free_all_filters(struct bnxt *bp);
void bnxt_free_filter_mem(struct bnxt *bp);
int bnxt_alloc_filter_mem(struct bnxt *bp);
+struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
+void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
+struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
+ struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE
+#define EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK
+#define NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR
+#define EM_FLOW_ALLOC_INPUT_EN_SRC_PORT \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_DST_PORT \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT
+#define EM_FLOW_ALLOC_INPUT_EN_IP_PROTO \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL
+#define EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN
+#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE
+#define L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN
+#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
+#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID
#endif
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index e710e636..bf1fb469 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -54,7 +54,7 @@
#include <rte_io.h>
-#define HWRM_CMD_TIMEOUT 2000
+#define HWRM_CMD_TIMEOUT 10000
struct bnxt_plcmodes_cfg {
uint32_t flags;
@@ -95,7 +95,7 @@ static int page_roundup(size_t size)
* command was failed by the ChiMP.
*/
-static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
+static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
uint32_t msg_len)
{
unsigned int i;
@@ -171,52 +171,58 @@ err_ret:
return -1;
}
-static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
-{
- int rc;
-
- rte_spinlock_lock(&bp->hwrm_lock);
- rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
- rte_spinlock_unlock(&bp->hwrm_lock);
- return rc;
-}
-
-#define HWRM_PREP(req, type, cr, resp) \
+/*
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
+ * spinlock, and does initial processing.
+ *
+ * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
+ * releases the spinlock only if it returns. If the regular int return codes
+ * are not used by the function, HWRM_CHECK_RESULT() should not be used
+ * directly, rather it should be copied and modified to suit the function.
+ *
+ * HWRM_UNLOCK() must be called after all response processing is completed.
+ */
+#define HWRM_PREP(req, type) do { \
+ rte_spinlock_lock(&bp->hwrm_lock); \
memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
req.req_type = rte_cpu_to_le_16(HWRM_##type); \
- req.cmpl_ring = rte_cpu_to_le_16(cr); \
+ req.cmpl_ring = rte_cpu_to_le_16(-1); \
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
req.target_id = rte_cpu_to_le_16(0xffff); \
- req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
-
-#define HWRM_CHECK_RESULT \
- { \
- if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
- return rc; \
+ req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
+} while (0)
+
+#define HWRM_CHECK_RESULT() do {\
+ if (rc) { \
+ RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
+ __func__, rc); \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ if (resp->resp_len >= 16) { \
+ struct hwrm_err_output *tmp_hwrm_err_op = \
+ (void *)resp; \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d:%d:%08x:%04x\n", \
+ __func__, \
+ rc, tmp_hwrm_err_op->cmd_err, \
+ rte_le_to_cpu_32(\
+ tmp_hwrm_err_op->opaque_0), \
+ rte_le_to_cpu_16(\
+ tmp_hwrm_err_op->opaque_1)); \
} \
- if (resp->error_code) { \
- rc = rte_le_to_cpu_16(resp->error_code); \
- if (resp->resp_len >= 16) { \
- struct hwrm_err_output *tmp_hwrm_err_op = \
- (void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
- rc, tmp_hwrm_err_op->cmd_err, \
- rte_le_to_cpu_32(\
- tmp_hwrm_err_op->opaque_0), \
- rte_le_to_cpu_16(\
- tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
- } \
- return rc; \
+ else { \
+ RTE_LOG(ERR, PMD, \
+ "%s error %d\n", __func__, rc); \
} \
- }
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+} while (0)
+
+#define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
@@ -224,13 +230,14 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.mask = 0;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -245,14 +252,14 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t mask = 0;
- HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
/* FIXME add multicast flag, when multicast adding options is supported
* by ethtool.
*/
if (vnic->flags & BNXT_VNIC_INFO_BCAST)
- mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
+ mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
@@ -269,16 +276,16 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (vlan_table) {
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
- req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
- rte_mem_virt2phy(vlan_table));
+ req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
+ rte_mem_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
- req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
- mask);
+ req.mask = rte_cpu_to_le_32(mask);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -307,21 +314,22 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
return 0;
}
}
- HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
+ HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
-int bnxt_hwrm_clear_filter(struct bnxt *bp,
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *filter)
{
int rc = 0;
@@ -331,32 +339,50 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
if (filter->fw_l2_filter_id == UINT64_MAX)
return 0;
- HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
+ HWRM_PREP(req, CFA_L2_FILTER_FREE);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
filter->fw_l2_filter_id = -1;
return 0;
}
-int bnxt_hwrm_set_filter(struct bnxt *bp,
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
uint16_t dst_id,
struct bnxt_filter_info *filter)
{
int rc = 0;
struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
uint32_t enables = 0;
+ uint16_t j = dst_id - 1;
+
+ //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
+ if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
+ conf->pool_map[j].pools & (1UL << j)) {
+ RTE_LOG(DEBUG, PMD,
+ "Add vlan %u to vmdq pool %u\n",
+ conf->pool_map[j].vlan_id, j);
+
+ filter->l2_ivlan = conf->pool_map[j].vlan_id;
+ filter->enables |=
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ }
if (filter->fw_l2_filter_id != UINT64_MAX)
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
- HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
+ HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
req.flags = rte_cpu_to_le_32(filter->flags);
@@ -376,8 +402,14 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
req.l2_ovlan = filter->l2_ovlan;
if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
+ req.l2_ovlan = filter->l2_ivlan;
+ if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
+ req.l2_ovlan_mask = filter->l2_ivlan_mask;
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -387,9 +419,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+ HWRM_UNLOCK();
return rc;
}
@@ -402,13 +435,13 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
uint16_t new_max_vfs;
int i;
- HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+ HWRM_PREP(req, FUNC_QCAPS);
req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
if (BNXT_PF(bp)) {
@@ -469,6 +502,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
if (BNXT_PF(bp))
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ HWRM_UNLOCK();
return rc;
}
@@ -479,13 +513,14 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
struct hwrm_func_reset_input req = {.req_type = 0 };
struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_RESET, -1, resp);
+ HWRM_PREP(req, FUNC_RESET);
req.enables = rte_cpu_to_le_32(0);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -499,7 +534,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_REGISTERED)
return 0;
- HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
+ HWRM_PREP(req, FUNC_DRV_RGTR);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
req.ver_maj = RTE_VER_YEAR;
@@ -519,7 +554,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
bp->flags |= BNXT_FLAG_REGISTERED;
@@ -538,19 +574,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
uint32_t dev_caps_cfg;
bp->max_req_len = HWRM_MAX_REQ_LEN;
- HWRM_PREP(req, VER_GET, -1, resp);
+ HWRM_PREP(req, VER_GET);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
- /*
- * Hold the lock since we may be adjusting the response pointers.
- */
- rte_spinlock_lock(&bp->hwrm_lock);
- rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -612,7 +644,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"Unable to map response buffer to physical memory.\n");
@@ -638,7 +670,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD,
@@ -651,7 +683,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
error:
- rte_spinlock_unlock(&bp->hwrm_lock);
+ HWRM_UNLOCK();
return rc;
}
@@ -664,12 +696,13 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
if (!(bp->flags & BNXT_FLAG_REGISTERED))
return 0;
- HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
+ HWRM_PREP(req, FUNC_DRV_UNRGTR);
req.flags = flags;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
bp->flags &= ~BNXT_FLAG_REGISTERED;
@@ -685,7 +718,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
uint32_t link_speed_mask =
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
+ HWRM_PREP(req, PORT_PHY_CFG);
if (conf->link_up) {
req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -729,7 +762,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -741,18 +775,18 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
+ HWRM_PREP(req, PORT_PHY_QCFG);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
link_info->phy_link_status = resp->link;
link_info->link_up =
(link_info->phy_link_status ==
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
- link_info->duplex = resp->duplex;
+ link_info->duplex = resp->duplex_cfg;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
link_info->force_pause = resp->force_pause;
@@ -765,6 +799,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
+ HWRM_UNLOCK();
+
return rc;
}
@@ -774,11 +810,11 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
+ HWRM_PREP(req, QUEUE_QPORTCFG);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
#define GET_QUEUE_INFO(x) \
bp->cos_queue[x].id = resp->queue_id##x; \
@@ -793,6 +829,8 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
GET_QUEUE_INFO(6);
GET_QUEUE_INFO(7);
+ HWRM_UNLOCK();
+
return rc;
}
@@ -806,7 +844,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct hwrm_ring_alloc_input req = {.req_type = 0 };
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_ALLOC, -1, resp);
+ HWRM_PREP(req, RING_ALLOC);
req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
req.fbo = rte_cpu_to_le_32(0);
@@ -837,6 +875,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
default:
RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
ring_type);
+ HWRM_UNLOCK();
return -1;
}
req.enables = rte_cpu_to_le_32(enables);
@@ -850,22 +889,27 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
RTE_LOG(ERR, PMD,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ HWRM_UNLOCK();
return rc;
default:
RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ HWRM_UNLOCK();
return rc;
}
}
ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
+ HWRM_UNLOCK();
return rc;
}
@@ -876,7 +920,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
struct hwrm_ring_free_input req = {.req_type = 0 };
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_FREE, -1, resp);
+ HWRM_PREP(req, RING_FREE);
req.ring_type = ring_type;
req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
@@ -886,6 +930,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
if (rc || resp->error_code) {
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
+ HWRM_UNLOCK();
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
@@ -905,6 +950,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
return rc;
}
}
+ HWRM_UNLOCK();
return 0;
}
@@ -914,7 +960,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
+ HWRM_PREP(req, RING_GRP_ALLOC);
req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
@@ -923,11 +969,13 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
bp->grp_info[idx].fw_grp_id =
rte_le_to_cpu_16(resp->ring_group_id);
+ HWRM_UNLOCK();
+
return rc;
}
@@ -937,13 +985,14 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
struct hwrm_ring_grp_free_input req = {.req_type = 0 };
struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, RING_GRP_FREE, -1, resp);
+ HWRM_PREP(req, RING_GRP_FREE);
req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
return rc;
@@ -958,13 +1007,14 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
- HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+ HWRM_PREP(req, STAT_CTX_CLR_STATS);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -976,7 +1026,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
+ HWRM_PREP(req, STAT_CTX_ALLOC);
req.update_period_ms = rte_cpu_to_le_32(0);
@@ -985,10 +1035,13 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+ HWRM_UNLOCK();
+ bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
return rc;
}
@@ -999,13 +1052,14 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
+ HWRM_PREP(req, STAT_CTX_FREE);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1027,15 +1081,16 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE;
- HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+ HWRM_PREP(req, VNIC_ALLOC);
if (vnic->func_default)
req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ HWRM_UNLOCK();
RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1048,13 +1103,13 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
+ HWRM_PREP(req, VNIC_PLCMODES_QCFG);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
pmode->flags = rte_le_to_cpu_32(resp->flags);
/* dflt_vnic bit doesn't exist in the _cfg command */
@@ -1063,6 +1118,8 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
+ HWRM_UNLOCK();
+
return rc;
}
@@ -1074,7 +1131,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
@@ -1089,7 +1146,8 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1099,7 +1157,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ uint32_t ctx_enable_flag = 0;
struct bnxt_plcmodes_cfg pmodes;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
@@ -1111,18 +1169,19 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
if (rc)
return rc;
- HWRM_PREP(req, VNIC_CFG, -1, resp);
+ HWRM_PREP(req, VNIC_CFG);
/* Only RSS support for now TBD: COS & LB */
req.enables =
- rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
- HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
if (vnic->lb_rule != 0xffff)
- ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
if (vnic->cos_rule != 0xffff)
- ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
- if (vnic->rss_rule != 0xffff)
- ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
+ if (vnic->rss_rule != 0xffff) {
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
+ ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
+ }
req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
@@ -1151,7 +1210,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
@@ -1169,7 +1229,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
- HWRM_PREP(req, VNIC_QCFG, -1, resp);
+ HWRM_PREP(req, VNIC_QCFG);
req.enables =
rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
@@ -1178,7 +1238,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
@@ -1198,6 +1258,8 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
+ HWRM_UNLOCK();
+
return rc;
}
@@ -1208,13 +1270,14 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ HWRM_UNLOCK();
RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
@@ -1231,13 +1294,14 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
- HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
vnic->rss_rule = INVALID_HW_RING_ID;
@@ -1255,13 +1319,14 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
- HWRM_PREP(req, VNIC_FREE, -1, resp);
+ HWRM_PREP(req, VNIC_FREE);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
vnic->fw_vnic_id = INVALID_HW_RING_ID;
return rc;
@@ -1274,7 +1339,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
+ HWRM_PREP(req, VNIC_RSS_CFG);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@@ -1286,7 +1351,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1299,7 +1365,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t size;
- HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
+ HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.flags = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
@@ -1315,7 +1381,8 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1327,7 +1394,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+ HWRM_PREP(req, VNIC_TPA_CFG);
if (enable) {
req.enables = rte_cpu_to_le_32(
@@ -1350,7 +1417,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1367,10 +1435,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
bp->pf.vf_info[vf].random_mac = false;
@@ -1384,17 +1453,19 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+ HWRM_PREP(req, FUNC_QSTATS);
req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
if (dropped)
*dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
+ HWRM_UNLOCK();
+
return rc;
}
@@ -1405,13 +1476,13 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+ HWRM_PREP(req, FUNC_QSTATS);
req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
@@ -1432,6 +1503,8 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ HWRM_UNLOCK();
+
return rc;
}
@@ -1441,13 +1514,14 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
struct hwrm_func_clr_stats_input req = {.req_type = 0};
struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+ HWRM_PREP(req, FUNC_CLR_STATS);
req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -1542,12 +1616,8 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
- if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
- RTE_LOG(ERR, PMD,
- "Attempt to free invalid ring group %d\n",
- idx);
+ if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
continue;
- }
rc = bnxt_hwrm_ring_grp_free(bp, idx);
@@ -1683,7 +1753,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
@@ -1700,9 +1770,39 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- rc = bnxt_hwrm_clear_filter(bp, filter);
- if (rc)
- break;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_clear_em_filter(bp, filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ //if (rc)
+ //break;
+ }
+ return rc;
+}
+
+static int
+bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ struct rte_flow *flow;
+ int rc = 0;
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ filter = flow->filter;
+ RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_clear_em_filter(bp, filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ //if (rc)
+ //break;
}
return rc;
}
@@ -1713,7 +1813,15 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
STAILQ_FOREACH(filter, &vnic->filter, next) {
- rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
+ filter);
+ else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
+ filter);
+ else
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter);
if (rc)
break;
}
@@ -1734,20 +1842,20 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic;
- unsigned int i;
+ int i;
if (bp->vnic_info == NULL)
return;
- vnic = &bp->vnic_info[0];
- if (BNXT_PF(bp))
- bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
-
- /* VNIC resources */
- for (i = 0; i < bp->nr_vnics; i++) {
+ /*
+ * Cleanup VNICs in reverse order, to make sure the L2 filter
+ * from vnic0 is last to be cleaned up.
+ */
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ bnxt_clear_hwrm_vnic_flows(bp, vnic);
+
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
@@ -1833,7 +1941,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
-static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
+static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
{
uint32_t one_speed;
@@ -2038,12 +2146,12 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ HWRM_PREP(req, FUNC_QCFG);
req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
/* Hard Coded.. 0xfff VLAN ID mask */
bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
@@ -2059,6 +2167,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
break;
}
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2118,10 +2228,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
req.fid = rte_cpu_to_le_16(0xffff);
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2187,7 +2299,7 @@ static void reserve_resources_from_vf(struct bnxt *bp,
int rc;
/* Get the actual allocated values now */
- HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+ HWRM_PREP(req, FUNC_QCAPS);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -2212,6 +2324,8 @@ static void reserve_resources_from_vf(struct bnxt *bp,
*/
//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+
+ HWRM_UNLOCK();
}
int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
@@ -2221,7 +2335,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
int rc;
/* Check for zero MAC address */
- HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ HWRM_PREP(req, FUNC_QCFG);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
@@ -2232,7 +2346,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
- return rte_le_to_cpu_16(resp->vlan);
+ rc = rte_le_to_cpu_16(resp->vlan);
+
+ HWRM_UNLOCK();
+
+ return rc;
}
static int update_pf_resource_max(struct bnxt *bp)
@@ -2242,15 +2360,17 @@ static int update_pf_resource_max(struct bnxt *bp)
int rc;
/* And copy the allocated numbers into the pf struct */
- HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ HWRM_PREP(req, FUNC_QCFG);
req.fid = rte_cpu_to_le_16(0xffff);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
/* Only TX ring value reflects actual allocation? TODO */
bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
bp->pf.evb_mode = resp->evb_mode;
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2342,7 +2462,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -2357,9 +2477,12 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
RTE_LOG(ERR, PMD,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
+ HWRM_UNLOCK();
break;
}
+ HWRM_UNLOCK();
+
reserve_resources_from_vf(bp, &req, i);
bp->pf.active_vfs++;
bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
@@ -2392,14 +2515,15 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
req.evb_mode = bp->pf.evb_mode;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2411,11 +2535,11 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
+ HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_val = port;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
switch (tunnel_type) {
case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
@@ -2429,6 +2553,9 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
default:
break;
}
+
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2439,11 +2566,14 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
+ HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
+
req.tunnel_type = tunnel_type;
req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2455,11 +2585,14 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
+
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.flags = rte_cpu_to_le_32(flags);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2482,14 +2615,14 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+ HWRM_PREP(req, FUNC_BUF_RGTR);
req.req_buf_num_pages = rte_cpu_to_le_16(1);
req.req_buf_page_size = rte_cpu_to_le_16(
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr[0] =
- rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD,
"unable to map buffer address to physical memory\n");
@@ -2498,7 +2631,8 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2509,11 +2643,12 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
+ HWRM_PREP(req, FUNC_BUF_UNRGTR);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2524,7 +2659,8 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
+
req.fid = rte_cpu_to_le_16(0xffff);
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
req.enables = rte_cpu_to_le_32(
@@ -2532,7 +2668,9 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2543,13 +2681,16 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
struct hwrm_func_vf_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_VF_CFG);
+
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2562,7 +2703,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
uint32_t func_cfg_flags;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
if (is_vf) {
dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
@@ -2580,7 +2721,9 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2592,13 +2735,16 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
+
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(enables);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.max_bw = rte_cpu_to_le_32(max_bw);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2609,14 +2755,17 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc = 0;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
+
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2631,14 +2780,15 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
+ HWRM_PREP(req, REJECT_FWD_RESP);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -2650,13 +2800,17 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
- HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ HWRM_PREP(req, FUNC_QCFG);
+
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
+
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2670,50 +2824,55 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
if (ec_size > sizeof(req.encap_request))
return -1;
- HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+ HWRM_PREP(req, EXEC_FWD_RESP);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
- struct rte_eth_stats *stats)
+ struct rte_eth_stats *stats, uint8_t rx)
{
int rc = 0;
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
- HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
+ HWRM_PREP(req, STAT_CTX_QUERY);
req.stat_ctx_id = rte_cpu_to_le_32(cid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
-
- stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
- stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
- stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
- stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ HWRM_CHECK_RESULT();
+
+ if (rx) {
+ stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+ } else {
+ stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
+ }
- stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
- stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
- stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
- stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
- stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
- stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
+ HWRM_UNLOCK();
return rc;
}
@@ -2728,12 +2887,16 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
- HWRM_PREP(req, PORT_QSTATS, -1, resp);
+ HWRM_PREP(req, PORT_QSTATS);
+
req.port_id = rte_cpu_to_le_16(pf->port_id);
req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2747,10 +2910,14 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
return 0;
- HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+ HWRM_PREP(req, PORT_CLR_STATS);
+
req.port_id = rte_cpu_to_le_16(pf->port_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2763,10 +2930,11 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
if (BNXT_VF(bp))
return 0;
- HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
+ HWRM_PREP(req, PORT_LED_QCAPS);
req.port_id = bp->pf.port_id;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
unsigned int i;
@@ -2786,6 +2954,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
}
}
}
+
+ HWRM_UNLOCK();
+
return rc;
}
@@ -2801,7 +2972,8 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
- HWRM_PREP(req, PORT_LED_CFG, -1, resp);
+ HWRM_PREP(req, PORT_LED_CFG);
+
if (led_on) {
led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
duration = rte_cpu_to_le_16(500);
@@ -2819,8 +2991,171 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
+ uint32_t *length)
+{
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, NVM_GET_DIR_INFO);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ if (!rc) {
+ *entries = rte_le_to_cpu_32(resp->entries);
+ *length = rte_le_to_cpu_32(resp->entry_length);
+ }
+ return rc;
+}
+
+int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
+{
+ int rc;
+ uint32_t dir_entries;
+ uint32_t entry_length;
+ uint8_t *buf;
+ size_t buflen;
+ rte_iova_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+ struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
+
+ rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = rte_malloc("nvm_dir", buflen, 0);
+ rte_mem_lock_page(buf);
+ if (buf == NULL)
+ return -ENOMEM;
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
+ req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+
+ rte_free(buf);
+
+ return rc;
+}
+
+int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
+ uint32_t offset, uint32_t length,
+ uint8_t *data)
+{
+ int rc;
+ uint8_t *buf;
+ rte_iova_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+ struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
+
+ buf = rte_malloc("nvm_item", length, 0);
+ rte_mem_lock_page(buf);
+ if (!buf)
+ return -ENOMEM;
+
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ HWRM_PREP(req, NVM_READ);
+ req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
+ req.dir_idx = rte_cpu_to_le_16(index);
+ req.offset = rte_cpu_to_le_32(offset);
+ req.len = rte_cpu_to_le_32(length);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ if (rc == 0)
+ memcpy(data, buf, length);
+
+ rte_free(buf);
+ return rc;
+}
+
+int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
+{
+ int rc;
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+ struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
+ req.dir_idx = rte_cpu_to_le_16(index);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+
+int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
+ uint16_t dir_ordinal, uint16_t dir_ext,
+ uint16_t dir_attr, const uint8_t *data,
+ size_t data_len)
+{
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
+ rte_iova_t dma_handle;
+ uint8_t *buf;
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
+
+ buf = rte_malloc("nvm_write", data_len, 0);
+ rte_mem_lock_page(buf);
+ if (!buf)
+ return -ENOMEM;
+
+ dma_handle = rte_mem_virt2iova(buf);
+ if (dma_handle == 0) {
+ RTE_LOG(ERR, PMD,
+ "unable to map response address to physical memory\n");
+ return -ENOMEM;
+ }
+ memcpy(buf, data, data_len);
+ req.host_src_addr = rte_cpu_to_le_64(dma_handle);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ rte_free(buf);
return rc;
}
@@ -2857,28 +3192,34 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
int rc;
/* First query all VNIC ids */
- HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
+ HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) {
+ HWRM_UNLOCK();
RTE_LOG(ERR, PMD,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
+ HWRM_UNLOCK();
RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
+ HWRM_UNLOCK();
RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
+ rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
+
+ HWRM_UNLOCK();
- return rte_le_to_cpu_32(resp->vnic_id_cnt);
+ return rc;
}
/*
@@ -2943,7 +3284,8 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
struct hwrm_func_cfg_input req = {0};
int rc;
- HWRM_PREP(req, FUNC_CFG, -1, resp);
+ HWRM_PREP(req, FUNC_CFG);
+
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
req.enables |= rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
@@ -2951,7 +3293,9 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT;
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -3004,3 +3348,215 @@ exit:
rte_free(vnic_ids);
return -1;
}
+
+int bnxt_hwrm_set_em_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ if (filter->fw_em_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_em_filter(bp, filter);
+
+ HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(dst_id);
+
+ if (filter->ip_addr_type) {
+ req.ip_addr_type = filter->ip_addr_type;
+ enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+ }
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+ memcpy(req.src_macaddr, filter->src_macaddr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
+ memcpy(req.dst_macaddr, filter->dst_macaddr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
+ req.ovlan_vid = filter->l2_ovlan;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
+ req.ivlan_vid = filter->l2_ivlan;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
+ req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+ req.ip_protocol = filter->ip_protocol;
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+ req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
+ req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
+ req.src_port = rte_cpu_to_be_16(filter->src_port);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
+ req.dst_port = rte_cpu_to_be_16(filter->dst_port);
+ if (enables &
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+ req.mirror_vnic_id = filter->mirror_vnic_id;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (filter->fw_em_filter_id == UINT64_MAX)
+ return 0;
+
+ RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ HWRM_PREP(req, CFA_EM_FLOW_FREE);
+
+ req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ filter->fw_em_filter_id = -1;
+ filter->fw_l2_filter_id = -1;
+
+ return 0;
+}
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
+ uint16_t dst_id,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ if (filter->fw_ntuple_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(dst_id);
+
+
+ if (filter->ip_addr_type) {
+ req.ip_addr_type = filter->ip_addr_type;
+ enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
+ }
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
+ memcpy(req.src_macaddr, filter->src_macaddr,
+ ETHER_ADDR_LEN);
+ //if (enables &
+ //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
+ //memcpy(req.dst_macaddr, filter->dst_macaddr,
+ //ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
+ req.ethertype = rte_cpu_to_be_16(filter->ethertype);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
+ req.ip_protocol = filter->ip_protocol;
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
+ req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
+ req.src_ipaddr_mask[0] =
+ rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
+ req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
+ req.dst_ipaddr_mask[0] =
+ rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
+ req.src_port = rte_cpu_to_le_16(filter->src_port);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
+ req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
+ req.dst_port = rte_cpu_to_le_16(filter->dst_port);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
+ req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
+ if (enables &
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
+ req.mirror_vnic_id = filter->mirror_vnic_id;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_ntuple_filter_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ if (filter->fw_ntuple_filter_id == UINT64_MAX)
+ return 0;
+
+ HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
+
+ req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ filter->fw_ntuple_filter_id = -1;
+ filter->fw_l2_filter_id = -1;
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 51cd0dd4..85083e61 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -51,9 +51,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
uint16_t vlan_count,
struct bnxt_vlan_antispoof_table_entry *vlan_table);
-int bnxt_hwrm_clear_filter(struct bnxt *bp,
+int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
-int bnxt_hwrm_set_filter(struct bnxt *bp,
+int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
uint16_t dst_id,
struct bnxt_filter_info *filter);
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
@@ -92,7 +92,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
- struct rte_eth_stats *stats);
+ struct rte_eth_stats *stats, uint8_t rx);
int bnxt_hwrm_ver_get(struct bnxt *bp);
@@ -156,4 +156,23 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
bool on);
int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf);
+int bnxt_hwrm_set_em_filter(struct bnxt *bp, uint16_t dst_id,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
+
+int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, uint16_t dst_id,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter);
+int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data);
+int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
+ uint32_t *length);
+int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
+ uint32_t offset, uint32_t length,
+ uint8_t *data);
+int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index);
+int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
+ uint16_t dir_ordinal, uint16_t dir_ext,
+ uint16_t dir_attr, const uint8_t *data,
+ size_t data_len);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 47cda7e5..49436cfd 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -50,11 +50,18 @@ static void bnxt_int_handler(void *param)
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- uint32_t raw_cons = cpr->cp_raw_cons;
- uint32_t cons;
struct cmpl_base *cmp;
+ uint32_t raw_cons;
+ uint32_t cons;
+ if (cpr == NULL)
+ return;
+
+ raw_cons = cpr->cp_raw_cons;
while (1) {
+ if (!cpr || !cpr->cp_ring_struct)
+ return;
+
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
cmp = &cpr->cp_desc_ring[cons];
diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
index e21bec56..4d2f7af9 100644
--- a/drivers/net/bnxt/bnxt_irq.h
+++ b/drivers/net/bnxt/bnxt_irq.h
@@ -34,6 +34,9 @@
#ifndef _BNXT_IRQ_H_
#define _BNXT_IRQ_H_
+#define BNXT_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define BNXT_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
struct bnxt_irq {
rte_intr_callback_fn handler;
unsigned int vector;
diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 00000000..c5ccc9bc
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,75 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_NONE 0
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NONE 0
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#define BNX_PKG_LOG_MAX_LENGTH 4096
+
+enum bnxnvm_pkglog_field_index {
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
+ BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+ BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
+ BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
+ BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
+};
+
+#endif /* Don't add anything after this line */
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 9d0ae277..0fa2f0c0 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -98,7 +98,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
@@ -172,15 +172,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
+ "Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map ring address to physical memory\n");
@@ -231,7 +231,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring->bd = ((char *)mz->addr + ag_ring_start);
rx_ring_info->ag_desc_ring =
(struct rx_prod_pkt_bd *)rx_ring->bd;
- rx_ring->bd_dma = mz->phys_addr + ag_ring_start;
+ rx_ring->bd_dma = mz->iova + ag_ring_start;
rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
rx_ring->mem_zone = (const void *)mz;
@@ -323,8 +323,10 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
- if (ring == NULL)
+ if (ring == NULL) {
RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ goto err_out;
+ }
rc = bnxt_hwrm_ring_alloc(bp, ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 6d1eb588..164f482e 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -41,7 +41,7 @@
#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+ ((uint64_t)((mb)->buf_iova + (mb)->data_off))
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
@@ -70,7 +70,7 @@
struct bnxt_ring {
void *bd;
- phys_addr_t bd_dma;
+ rte_iova_t bd_dma;
uint32_t ring_size;
uint32_t ring_mask;
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 0793820b..c4da474e 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -60,10 +60,13 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
int bnxt_mq_rx_configure(struct bnxt *bp)
{
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- unsigned int i, j, nb_q_per_grp, ring_idx;
- int start_grp_id, end_grp_id, rc = 0;
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
+ unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
+ int start_grp_id, end_grp_id = 1, rc = 0;
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter;
+ enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
struct bnxt_rx_queue *rxq;
bp->nr_vnics = 0;
@@ -98,117 +101,125 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
/* Multi-queue mode */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
- enum rte_eth_nb_pools pools;
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
- {
- const struct rte_eth_vmdq_rx_conf *conf =
- &dev_conf->rx_adv_conf.vmdq_rx_conf;
-
- /* ETH_8/64_POOLs */
- pools = conf->nb_queue_pools;
- break;
- }
+ /* ETH_8/64_POOLs */
+ pools = conf->nb_queue_pools;
+ /* For each pool, allocate MACVLAN CFA rule & VNIC */
+ max_pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx,
+ ETH_64_POOLS)));
+ if (pools > max_pools)
+ pools = max_pools;
+ break;
+ case ETH_MQ_RX_RSS:
+ pools = bp->rx_cp_nr_rings;
+ break;
default:
RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
}
- /* For each pool, allocate MACVLAN CFA rule & VNIC */
- if (!pools) {
- pools = RTE_MIN(bp->max_vnics,
- RTE_MIN(bp->max_l2_ctx,
- RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
- RTE_LOG(ERR, PMD,
- "VMDq pool not set, defaulted to 64\n");
- pools = ETH_64_POOLS;
+ }
+
+ nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ start_grp_id = 0;
+ end_grp_id = nb_q_per_grp;
+
+ for (i = 0; i < pools; i++) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
}
- nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- start_grp_id = 0;
- end_grp_id = nb_q_per_grp;
-
- ring_idx = 0;
- for (i = 0; i < pools; i++) {
- vnic = bnxt_alloc_vnic(bp);
- if (!vnic) {
- RTE_LOG(ERR, PMD,
- "VNIC alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- vnic->flags |= BNXT_VNIC_INFO_BCAST;
- STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
- bp->nr_vnics++;
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
+ STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
+ bp->nr_vnics++;
- for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
- rxq = bp->eth_dev->data->rx_queues[ring_idx];
- rxq->vnic = vnic;
- }
- if (i == 0)
- vnic->func_default = true;
- vnic->ff_pool_idx = i;
- vnic->start_grp_id = start_grp_id;
- vnic->end_grp_id = end_grp_id;
-
- filter = bnxt_alloc_filter(bp);
- if (!filter) {
- RTE_LOG(ERR, PMD,
- "L2 filter alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
+ for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+ rxq = bp->eth_dev->data->rx_queues[ring_idx];
+ rxq->vnic = vnic;
+ }
+ if (i == 0) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ bp->eth_dev->data->promiscuous = 1;
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
- /*
- * TODO: Configure & associate CFA rule for
- * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
- */
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
-
- start_grp_id = end_grp_id + 1;
- end_grp_id += nb_q_per_grp;
+ vnic->func_default = true;
}
- goto out;
- }
+ vnic->ff_pool_idx = i;
+ vnic->start_grp_id = start_grp_id;
+ vnic->end_grp_id = end_grp_id;
+
+ if (i) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ vnic->rss_dflt_cr = true;
+ goto skip_filter_allocation;
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ /*
+ * TODO: Configure & associate CFA rule for
+ * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
+ */
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
- /* Non-VMDq mode - RSS, DCB, RSS+DCB */
- /* Init default VNIC for RSS or DCB only */
- vnic = bnxt_alloc_vnic(bp);
- if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- vnic->flags |= BNXT_VNIC_INFO_BCAST;
- /* Partition the rx queues for the single pool */
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- rxq = bp->eth_dev->data->rx_queues[i];
- rxq->vnic = vnic;
- }
- STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
- bp->nr_vnics++;
-
- vnic->func_default = true;
- vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 0;
- vnic->end_grp_id = bp->rx_cp_nr_rings;
- filter = bnxt_alloc_filter(bp);
- if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
+skip_filter_allocation:
+ start_grp_id = end_grp_id;
+ end_grp_id += nb_q_per_grp;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
-
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- vnic->hash_type =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
- HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
out:
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
+ uint16_t hash_type = 0;
+
+ if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
+ rss = &bp->rss_conf;
+ bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
+ }
+
+ if (rss->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss->rss_key &&
+ rss->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key,
+ rss->rss_key, rss->rss_key_len);
+ }
+ }
+ }
+
return rc;
err_out:
@@ -349,3 +360,41 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
out:
return rc;
}
+
+int
+bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_ARM(cpr);
+ }
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_DISARM(cpr);
+ }
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 01aaa007..508731ee 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -48,7 +48,7 @@ struct bnxt_rx_queue {
uint16_t rx_free_thresh; /* max free RX desc to hold */
uint16_t queue_id; /* RX queue index */
uint16_t reg_idx; /* RX queue register index */
- uint8_t port_id; /* Device port identifier */
+ uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
struct bnxt *bp;
@@ -73,5 +73,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
void bnxt_free_rx_mbufs(struct bnxt *bp);
+int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
+int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index bee67d33..30891b74 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -199,7 +199,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
if (tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_RX_VLAN;
}
if (likely(tpa_start1->flags2 &
rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
@@ -219,6 +219,9 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ cpr->valid = FLIP_VALID(raw_cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
}
@@ -332,6 +335,48 @@ static inline struct rte_mbuf *bnxt_tpa_end(
return mbuf;
}
+static uint32_t
+bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t pkt_type = 0;
+ uint32_t t_ipcs = 0, ip = 0, ip6 = 0;
+ uint32_t tcp = 0, udp = 0, icmp = 0;
+ uint32_t vlan = 0;
+
+ vlan = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
+ t_ipcs = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
+ ip6 = !!(rxcmp1->flags2 &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
+ icmp = !!(rxcmp->flags_type &
+ rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_ICMP));
+ tcp = !!(rxcmp->flags_type &
+ rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_TCP));
+ udp = !!(rxcmp->flags_type &
+ rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_UDP));
+ ip = !!(rxcmp->flags_type &
+ rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_IP));
+
+ pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && !ip6) ?
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0;
+ pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && ip6) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0;
+ pkt_type |= (!t_ipcs && icmp) ? RTE_PTYPE_L4_ICMP : 0;
+ pkt_type |= (!t_ipcs && udp) ? RTE_PTYPE_L4_UDP : 0;
+ pkt_type |= (!t_ipcs && tcp) ? RTE_PTYPE_L4_TCP : 0;
+ pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && !ip6) ?
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN : 0;
+ pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && ip6) ?
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN : 0;
+ pkt_type |= (t_ipcs && icmp) ? RTE_PTYPE_INNER_L4_ICMP : 0;
+ pkt_type |= (t_ipcs && udp) ? RTE_PTYPE_INNER_L4_UDP : 0;
+ pkt_type |= (t_ipcs && tcp) ? RTE_PTYPE_INNER_L4_TCP : 0;
+ pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : 0;
+
+ return pkt_type;
+}
+
static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
@@ -360,13 +405,17 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cpr->valid = FLIP_VALID(cp_cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
+
cmp_type = CMP_TYPE(rxcmp);
- if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
(struct rx_tpa_start_cmpl_hi *)rxcmp1);
rc = -EINVAL; /* Continue w/o new mbuf */
goto next_rx;
- } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
(struct rx_tpa_end_cmpl *)rxcmp,
(struct rx_tpa_end_cmpl_hi *)rxcmp1);
@@ -388,10 +437,10 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
cons = rxcmp->opaque;
mbuf = bnxt_consume_rx_buf(rxr, cons);
- rte_prefetch0(mbuf);
-
if (mbuf == NULL)
- return -ENOMEM;
+ return -EBUSY;
+
+ rte_prefetch0(mbuf);
mbuf->nb_segs = 1;
mbuf->next = NULL;
@@ -415,9 +464,21 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
(RX_PKT_CMPL_METADATA_VID_MASK |
RX_PKT_CMPL_METADATA_DE |
RX_PKT_CMPL_METADATA_PRI_MASK);
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_RX_VLAN;
}
+ if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+
+ if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+
+ mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
+
#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
@@ -448,13 +509,14 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
+ goto rx;
}
rxr->rx_prod = prod;
/*
* All MBUFs are allocated with the same size under DPDK,
* no optimization for rx_copy_thresh
*/
-
+rx:
*rx_pkt = mbuf;
next_rx:
@@ -476,22 +538,24 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct rx_pkt_cmpl *rxcmp;
uint16_t prod = rxr->rx_prod;
uint16_t ag_prod = rxr->ag_prod;
+ int rc = 0;
/* Handle RX burst request */
while (1) {
- int rc;
-
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rte_prefetch0(&cpr->cp_desc_ring[cons]);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
break;
+ cpr->valid = FLIP_VALID(cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
/* TODO: Avoid magic numbers... */
if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
- if (likely(!rc))
+ if (likely(!rc) || rc == -ENOMEM)
nb_rx_pkts++;
if (rc == -EBUSY) /* partial completion */
break;
@@ -514,6 +578,30 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
/* Ring the AGG ring DB */
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ /* Attempt to alloc Rx buf in case of a previous allocation failure. */
+ if (rc == -ENOMEM) {
+ int i;
+
+ for (i = prod; i <= nb_rx_pkts;
+ i = RING_NEXT(rxr->rx_ring_struct, i)) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+
+ /* Buffer already allocated for this index. */
+ if (rx_buf->mbuf != NULL)
+ continue;
+
+ /* This slot is empty. Alloc buffer for Rx */
+ if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
+ rxr->rx_prod = i;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ } else {
+ RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ break;
+ }
+ }
+ }
+
return nb_rx_pkts;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index f8d6dc80..a94373d1 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -52,6 +52,22 @@
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
+#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+
+#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
+
+#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+
+#define RX_CMP_IP_CS_OK(rxcmp1) \
+ (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
+ !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
+
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
@@ -85,8 +101,8 @@ struct bnxt_rx_ring_info {
struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */
- phys_addr_t rx_desc_mapping;
- phys_addr_t ag_desc_mapping;
+ rte_iova_t rx_desc_mapping;
+ rte_iova_t ag_desc_mapping;
struct bnxt_ring *rx_ring_struct;
struct bnxt_ring *ag_ring_struct;
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index d7d0e35c..fe83d370 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -228,9 +228,10 @@ void bnxt_free_stats(struct bnxt *bp)
}
}
-void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *bnxt_stats)
{
+ int rc = 0;
unsigned int i;
struct bnxt *bp = eth_dev->data->dev_private;
@@ -240,17 +241,26 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
+ rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
+ bnxt_stats, 1);
+ if (unlikely(rc))
+ return rc;
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
+ rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
+ bnxt_stats, 0);
+ if (unlikely(rc))
+ return rc;
}
- bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
+ rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
+ if (unlikely(rc))
+ return rc;
bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
+ return rc;
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
@@ -358,3 +368,54 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
RTE_LOG(ERR, PMD, "Operation not supported\n");
}
+
+int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ struct rte_eth_xstat xstats[stat_cnt];
+ uint64_t values_copy[stat_cnt];
+ uint16_t i;
+
+ if (!ids)
+ return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt);
+
+ bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ RTE_LOG(ERR, PMD, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return stat_cnt;
+}
+
+int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit)
+{
+ /* Account for the Tx drop pkts aka the Anti spoof counter */
+ const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + 1;
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+ uint16_t i;
+
+ if (!ids)
+ return bnxt_dev_xstats_get_names_op(dev, xstats_names,
+ stat_cnt);
+ bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL,
+ stat_cnt);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ RTE_LOG(ERR, PMD, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return stat_cnt;
+}
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index b6d133ef..51d16f5d 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -37,7 +37,7 @@
#include <rte_ethdev.h>
void bnxt_free_stats(struct bnxt *bp);
-void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *bnxt_stats);
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
@@ -46,6 +46,11 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_xstat *xstats, unsigned int n);
void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev);
+int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit);
+int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit);
struct bnxt_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 16f3a0bd..f753c10f 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -46,7 +46,7 @@ struct bnxt_tx_queue {
uint16_t tx_next_rs; /* next desc to set RS bit */
uint16_t queue_id; /* TX queue index */
uint16_t reg_idx; /* TX queue register index */
- uint8_t port_id; /* Device port identifier */
+ uint16_t port_id; /* Device port identifier */
uint8_t pthresh; /* Prefetch threshold register */
uint8_t hthresh; /* Host threshold register */
uint8_t wthresh; /* Write-back threshold reg */
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 6870b16d..8ca4bbd8 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -161,7 +161,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -211,21 +211,39 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
/* TSO */
- txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
tx_pkt->l4_len + tx_pkt->outer_l2_len +
tx_pkt->outer_l3_len;
txbd1->mss = tx_pkt->tso_segsz;
- } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM)) {
+ } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+ /* Outer IP, Inner IP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
+ txbd1->mss = 0;
+ } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
- txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
-
} else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
/* IP CSO */
- txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM;
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
+ txbd1->mss = 0;
+ } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ /* IP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
}
} else {
@@ -295,6 +313,9 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
break;
+ cpr->valid = FLIP_VALID(cons,
+ cpr->cp_ring_struct->ring_mask,
+ cpr->valid);
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
nb_tx_pkts++;
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 5b097114..2feac51d 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -49,7 +49,7 @@ struct bnxt_tx_ring_info {
struct tx_bd_long *tx_desc_ring;
struct bnxt_sw_tx_bd *tx_buf_ring;
- phys_addr_t tx_desc_mapping;
+ rte_iova_t tx_desc_mapping;
#define BNXT_DEV_STATE_CLOSING 0x1
uint32_t dev_state;
@@ -69,4 +69,25 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM)
+#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
+
+
+#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+ TX_BD_LONG_LFLAGS_T_IP_CHKSUM)
+
#endif
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index db9fb079..5bac2605 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -83,6 +83,7 @@ void bnxt_init_vnics(struct bnxt *bp)
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
+ STAILQ_INIT(&vnic->flow_list);
STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
}
for (i = 0; i < MAX_FF_POOLS; i++)
@@ -174,7 +175,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
uint16_t max_vnics;
int i;
- phys_addr_t mz_phys_addr;
+ rte_iova_t mz_phys_addr;
max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@@ -191,13 +192,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
if (!mz)
return -ENOMEM;
}
- mz_phys_addr = mz->phys_addr;
+ mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map vnic address to physical memory\n");
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 993f2212..875dc3c1 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -53,11 +53,11 @@ struct bnxt_vnic_info {
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
- phys_addr_t rss_table_dma_addr;
+ rte_iova_t rss_table_dma_addr;
uint16_t *rss_table;
- phys_addr_t rss_hash_key_dma_addr;
+ rte_iova_t rss_hash_key_dma_addr;
void *rss_hash_key;
- phys_addr_t mc_list_dma_addr;
+ rte_iova_t mc_list_dma_addr;
char *mc_list;
uint32_t mc_addr_cnt;
#define BNXT_MAX_MC_ADDRS 16
@@ -80,6 +80,7 @@ struct bnxt_vnic_info {
bool rss_dflt_cr;
STAILQ_HEAD(, bnxt_filter_info) filter;
+ STAILQ_HEAD(, rte_flow) flow_list;
};
struct bnxt;
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index cb8660af..c16edbad 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -33,25 +33,27 @@
#ifndef _HSI_STRUCT_DEF_DPDK_
#define _HSI_STRUCT_DEF_DPDK_
-/* HSI and HWRM Specification 1.7.7 */
+/* HSI and HWRM Specification 1.8.2 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 7
+#define HWRM_VERSION_MINOR 8
+#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_STR "1.7.7"
+#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR "1.8.2.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
+#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1
-#define HWRM_ROCE_SP_HSI_VERSION_MINOR 7
-#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 4
+#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8
+#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2
/*
* Request types
@@ -129,6 +131,9 @@
#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
+#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c))
+#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d))
+#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e))
#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
@@ -815,8 +820,6 @@ struct rx_pkt_cmpl {
* packet. Length = 32B
*/
#define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
- #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13)
- #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15)
/*
* When this bit is '1', it indicates a packet that has an error
* of some type. Type of error is indicated in error_flags.
@@ -1800,6 +1803,8 @@ struct hwrm_async_event_cmpl {
UINT32_C(0x32)
/* VF Configuration Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
+ /* LLFC/PFC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34)
/* HWRM Error */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff)
uint32_t event_data2;
@@ -2117,9 +2122,18 @@ struct hwrm_ver_get_output {
* This field returns the default request timeout value in
* milliseconds.
*/
+ uint8_t init_pending;
+ /*
+ * This field will indicate if any subsystems is not fully
+ * initialized.
+ */
+ /*
+ * If set to 1, device is not ready. If set to 0, device is
+ * ready to accept all HWRM commands.
+ */
+ #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1)
uint8_t unused_0;
uint8_t unused_1;
- uint8_t unused_2;
uint8_t valid;
/*
* This field is used in Output records to indicate that the
@@ -2246,6 +2260,122 @@ struct hwrm_func_reset_output {
*/
} __attribute__((packed));
+/* hwrm_func_vf_cfg */
+/*
+ * Description: This command allows configuration of a VF by its driver. If this
+ * function is called by a PF driver, then the HWRM shall fail this command. If
+ * guest VLAN and/or MAC address are provided in this command, then the HWRM
+ * shall set up appropriate MAC/VLAN filters for the VF that is being
+ * configured. A VF driver should set VF MTU/MRU using this command prior to
+ * allocating RX VNICs or TX rings for the corresponding VF.
+ */
+/* Input (32 bytes) */
+struct hwrm_func_vf_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the mtu field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ /* This bit must be '1' for the guest_vlan field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the async_event_cr field to be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
+ /* This bit must be '1' for the dflt_mac_addr field to be configured. */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit requested on the function. The HWRM
+ * should make sure that the mtu of the function does not exceed the mtu
+ * of the physical port that this function is associated with. In
+ * addition to requesting mtu per function, it is possible to configure
+ * mtu per transmit ring. By default, the mtu of each transmit ring
+ * associated with a function is equal to the mtu of the function. The
+ * HWRM should make sure that the mtu of each transmit ring that is
+ * assigned to a function has a valid mtu.
+ */
+ uint16_t guest_vlan;
+ /*
+ * The guest VLAN for the function being configured. This field's format
+ * is same as 802.1Q Tag's Tag Control Information (TCI) format that
+ * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t async_event_cr;
+ /*
+ * ID of the target completion ring for receiving asynchronous event
+ * completions. If this field is not valid, then the HWRM shall use the
+ * default completion ring of the function that is being configured as
+ * the target completion ring for providing any asynchronous event
+ * completions for that function. If this field is valid, then the HWRM
+ * shall use the completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event completions for
+ * the function that is being configured.
+ */
+ uint8_t dflt_mac_addr[6];
+ /*
+ * This value is the current MAC address requested by the VF driver to
+ * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
+ * MAC address configuration is requested by the VF driver. The parent
+ * PF driver may reject or overwrite this MAC address.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_func_vf_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_func_qcaps */
/*
* Description: This command returns capabilities of a function. The input FID
@@ -2727,8 +2857,16 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
uint16_t dflt_vnic_id;
/* The default VNIC ID assigned to a function that is being queried. */
- uint8_t unused_0;
- uint8_t unused_1;
+ uint16_t max_mtu_configured;
+ /*
+ * This value specifies the MAX MTU that can be configured by
+ * host drivers. This 'max_mtu_configure' can be HW max MTU or
+ * OEM applications specified value. Host drivers can't
+ * configure the MTU greater than this value. Host drivers
+ * should read this value prior to configuring the MTU. FW will
+ * fail the host request with MTU greater than
+ * 'max_mtu_configured'.
+ */
uint32_t min_bw;
/*
* Minimum BW allocated for this function. The HWRM will
@@ -2826,7 +2964,7 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
/* Virtual Ethernet Port Aggregator (VEPA) */
#define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_2;
+ uint8_t unused_0;
uint16_t alloc_vfs;
/*
* The number of VFs that are allocated to the function. This is
@@ -2846,7 +2984,7 @@ struct hwrm_func_qcfg_output {
* The number of strict priority transmit rings out of currently
* allocated TX rings to the function (alloc_tx_rings).
*/
- uint8_t unused_3;
+ uint8_t unused_1;
uint8_t valid;
/*
* This field is used in Output records to indicate that the
@@ -3199,6 +3337,14 @@ struct hwrm_func_cfg_input {
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
UINT32_C(0x1000)
+ /*
+ * This bit requests that the firmware test to see if all the
+ * assets requested in this command (i.e. number of TX rings)
+ * are available. The firmware will return an error if the
+ * requested assets are not available. The firwmare will NOT
+ * reserve the assets if they are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000)
uint32_t enables;
/* This bit must be '1' for the mtu field to be configured. */
#define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
@@ -4236,123 +4382,6 @@ struct hwrm_func_buf_unrgtr_output {
*/
} __attribute__((packed));
-/* hwrm_func_vf_cfg */
-/*
- * Description: This command allows configuration of a VF by its driver. If this
- * function is called by a PF driver, then the HWRM shall fail this command. If
- * guest VLAN and/or MAC address are provided in this command, then the HWRM
- * shall set up appropriate MAC/VLAN filters for the VF that is being
- * configured. A VF driver should set VF MTU/MRU using this command prior to
- * allocating RX VNICs or TX rings for the corresponding VF.
- */
-/* Input (32 bytes) */
-
-struct hwrm_func_vf_cfg_input {
- uint16_t req_type;
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
- uint16_t cmpl_ring;
- /*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
- */
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
- /*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
- */
- uint32_t enables;
- /* This bit must be '1' for the mtu field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
- /* This bit must be '1' for the guest_vlan field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
- /*
- * This bit must be '1' for the async_event_cr field to be configured.
- */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
- /* This bit must be '1' for the dflt_mac_addr field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
- uint16_t mtu;
- /*
- * The maximum transmission unit requested on the function. The HWRM
- * should make sure that the mtu of the function does not exceed the mtu
- * of the physical port that this function is associated with. In
- * addition to requesting mtu per function, it is possible to configure
- * mtu per transmit ring. By default, the mtu of each transmit ring
- * associated with a function is equal to the mtu of the function. The
- * HWRM should make sure that the mtu of each transmit ring that is
- * assigned to a function has a valid mtu.
- */
- uint16_t guest_vlan;
- /*
- * The guest VLAN for the function being configured. This field's format
- * is same as 802.1Q Tag's Tag Control Information (TCI) format that
- * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
- */
- uint16_t async_event_cr;
- /*
- * ID of the target completion ring for receiving asynchronous event
- * completions. If this field is not valid, then the HWRM shall use the
- * default completion ring of the function that is being configured as
- * the target completion ring for providing any asynchronous event
- * completions for that function. If this field is valid, then the HWRM
- * shall use the completion ring identified by this ID as the target
- * completion ring for providing any asynchronous event completions for
- * the function that is being configured.
- */
- uint8_t dflt_mac_addr[6];
- /*
- * This value is the current MAC address requested by the VF driver to
- * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
- * MAC address configuration is requested by the VF driver. The parent
- * PF driver may reject or overwrite this MAC address.
- */
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-
-struct hwrm_func_vf_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
- /*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
- */
-} __attribute__((packed));
-
/* hwrm_port_phy_cfg */
/*
* Description: This command configures the PHY device for the port. It allows
@@ -4917,12 +4946,12 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t duplex;
+ uint8_t duplex_cfg;
/* This value is indicates the duplex of the current connection. */
/* Half Duplex connection. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0)
/* Full duplex connection. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1)
uint8_t pause;
/*
* This value is used to indicate the current pause
@@ -5250,6 +5279,11 @@ struct hwrm_port_phy_qcfg_output {
/* 40G_ACTIVE_CABLE */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
UINT32_C(0x18)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19)
+ /* 1G_baseSX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a)
+ /* 1G_baseCX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b)
uint8_t media_type;
/* This value represents a media type. */
/* Unknown */
@@ -5576,8 +5610,16 @@ struct hwrm_port_phy_qcfg_output {
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
UINT32_C(0x40)
+ uint8_t duplex_state;
+ /*
+ * This value is indicates the duplex of the current connection
+ * state.
+ */
+ /* Half Duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0)
+ /* Full duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1)
uint8_t unused_1;
- uint8_t unused_2;
char phy_vendor_name[16];
/*
* Up to 16 bytes of null padded ASCII string representing PHY
@@ -5591,10 +5633,10 @@ struct hwrm_port_phy_qcfg_output {
* to null, then the vendor specific part number is not
* available.
*/
- uint32_t unused_3;
+ uint32_t unused_2;
+ uint8_t unused_3;
uint8_t unused_4;
uint8_t unused_5;
- uint8_t unused_6;
uint8_t valid;
/*
* This field is used in Output records to indicate that the
@@ -7314,6 +7356,14 @@ struct hwrm_vnic_cfg_input {
* that is used for computing RSS hash only.
*/
#define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is being configured to receive
+ * both RoCE and non-RoCE traffic, but forward only the RoCE
+ * traffic further. Also, RoCE traffic can be mirrored to L2
+ * driver.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
+ UINT32_C(0x40)
uint32_t enables;
/*
* This bit must be '1' for the dflt_ring_grp field to be
@@ -7523,6 +7573,13 @@ struct hwrm_vnic_qcfg_output {
* is not configured.
*/
#define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured to receive both
+ * RoCE and non-RoCE traffic, but forward only RoCE traffic
+ * further. Also RoCE traffic can be mirrored to L2 driver.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
+ UINT32_C(0x40)
uint32_t unused_2;
uint8_t unused_3;
uint8_t unused_4;
@@ -7538,6 +7595,183 @@ struct hwrm_vnic_qcfg_output {
*/
} __attribute__((packed));
+
+/* hwrm_vnic_tpa_cfg */
+/* Description: This function is used to enable/configure TPA on the VNIC. */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of non-tunneled TCP
+ * packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Windows
+ * Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Linux
+ * Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for TCP packets with IP
+ * ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for GRE tunneled TCP
+ * packets only if all packets have the same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP/IPv4 packets with consecutively increasing
+ * IPIDs. In other words, the last packet that is being
+ * aggregated to an already existing aggregation context shall
+ * have IPID 1 more than the IPID of the last packet that was
+ * aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
+ * (IPv6) value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
+ uint32_t enables;
+ /* This bit must be '1' for the max_agg_segs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /* This bit must be '1' for the max_aggs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the max_agg_timer field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /* This bit must be '1' for the min_agg_len field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ uint16_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t max_agg_segs;
+ /*
+ * This is the maximum number of TCP segments that can be
+ * aggregated (unit is Log2). Max value is 31.
+ */
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ uint16_t max_aggs;
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_agg_timer;
+ /*
+ * This is the maximum amount of time allowed for an aggregation
+ * context to complete after it was initiated.
+ */
+ uint32_t min_agg_len;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_vnic_rss_cfg */
/* Description: This function is used to enable RSS configuration. */
/* Input (48 bytes) */
@@ -7651,7 +7885,6 @@ struct hwrm_vnic_rss_cfg_output {
* the VNIC.
*/
/* Input (40 bytes) */
-
struct hwrm_vnic_plcmodes_cfg_input {
uint16_t req_type;
/*
@@ -7770,7 +8003,6 @@ struct hwrm_vnic_plcmodes_cfg_input {
} __attribute__((packed));
/* Output (16 bytes) */
-
struct hwrm_vnic_plcmodes_cfg_output {
uint16_t error_code;
/*
@@ -7807,7 +8039,6 @@ struct hwrm_vnic_plcmodes_cfg_output {
* of the VNIC.
*/
/* Input (24 bytes) */
-
struct hwrm_vnic_plcmodes_qcfg_input {
uint16_t req_type;
/*
@@ -7840,7 +8071,6 @@ struct hwrm_vnic_plcmodes_qcfg_input {
} __attribute__((packed));
/* Output (24 bytes) */
-
struct hwrm_vnic_plcmodes_qcfg_output {
uint16_t error_code;
/*
@@ -8065,182 +8295,6 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
*/
} __attribute__((packed));
-/* hwrm_vnic_tpa_cfg */
-/* Description: This function is used to enable/configure TPA on the VNIC. */
-/* Input (40 bytes) */
-struct hwrm_vnic_tpa_cfg_input {
- uint16_t req_type;
- /*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
- */
- uint16_t cmpl_ring;
- /*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
- */
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
- /*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
- */
- uint32_t flags;
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of non-tunneled TCP
- * packets.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of tunneled TCP packets.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Windows
- * Receive Segment Coalescing (RSC) rules.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Linux
- * Generic Receive Offload (GRO) rules.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for TCP packets with IP
- * ECN set to non-zero.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
- /*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for GRE tunneled TCP
- * packets only if all packets have the same GRE sequence.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
- UINT32_C(0x20)
- /*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP/IPv4 packets with consecutively increasing
- * IPIDs. In other words, the last packet that is being
- * aggregated to an already existing aggregation context shall
- * have IPID 1 more than the IPID of the last packet that was
- * aggregated in that aggregation context.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
- /*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
- * (IPv6) value.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
- uint32_t enables;
- /* This bit must be '1' for the max_agg_segs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
- /* This bit must be '1' for the max_aggs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
- /*
- * This bit must be '1' for the max_agg_timer field to be
- * configured.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
- /* This bit must be '1' for the min_agg_len field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
- uint16_t vnic_id;
- /* Logical vnic ID */
- uint16_t max_agg_segs;
- /*
- * This is the maximum number of TCP segments that can be
- * aggregated (unit is Log2). Max value is 31.
- */
- /* 1 segment */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
- /* 2 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
- /* 4 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
- /* 8 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
- /* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- uint16_t max_aggs;
- /*
- * This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7
- */
- /* 1 aggregation */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
- /* 2 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
- /* 4 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
- /* 8 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
- /* 16 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
- /* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t max_agg_timer;
- /*
- * This is the maximum amount of time allowed for an aggregation
- * context to complete after it was initiated.
- */
- uint32_t min_agg_len;
- /*
- * This is the minimum amount of payload length required to
- * start an aggregation context.
- */
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_vnic_tpa_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
- */
-} __attribute__((packed));
-
/* hwrm_ring_alloc */
/*
* Description: This command allocates and does basic preparation for a ring.
@@ -9046,6 +9100,12 @@ struct hwrm_cfa_l2_filter_alloc_input {
* datagram payload
*/
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ /*
+ * IPV4 over virtual eXtensible Local Area
+ * Network (IPV4oVXLAN)
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
@@ -9471,6 +9531,25 @@ struct hwrm_cfa_l2_set_rx_mask_output {
*/
} __attribute__((packed));
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ uint8_t code;
+ /*
+ * command specific error codes that goes to the cmd_err field
+ * in Common HWRM Error Response.
+ */
+ /* Unknown error */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /*
+ * Unable to complete operation due to conflict
+ * with Ntuple Filter
+ */
+ #define \
+ HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
+ UINT32_C(0x1)
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
/* hwrm_cfa_vlan_antispoof_cfg */
/* Description: Configures vlan anti-spoof filters for VF. */
/* Input (32 bytes) */
@@ -9550,6 +9629,1010 @@ struct hwrm_cfa_vlan_antispoof_cfg_output {
*/
};
+/* hwrm_cfa_ntuple_filter_alloc */
+/*
+ * Description: This is a ntuple filter that uses fields from L4/L3 header and
+ * optionally fields from L2. The ntuple filters apply to receive traffic only.
+ * All L2/L3/L4 header fields are specified in network byte order. These filters
+ * can be used for Receive Flow Steering (RFS). # For ethertype value, only
+ * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a
+ * field specified in this command is not enabled as a valid field, then that
+ * field shall not be used in matching packet header fields against this filter.
+ */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * Setting of this flag indicates the applicability to the
+ * loopback path.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ /*
+ * Setting of this flag indicates drop action. If this flag is
+ * not set, then it should be considered accept action.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates that a meter is expected to be
+ * attached to this flow. This hint can be used when choosing
+ * the action record format required for the flow.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4)
+ uint32_t enables;
+ /* This bit must be '1' for the l2_filter_id field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /* This bit must be '1' for the ethertype field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the tunnel_type field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4)
+ /* This bit must be '1' for the src_macaddr field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x8)
+ /* This bit must be '1' for the ipaddr_type field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x10)
+ /* This bit must be '1' for the src_ipaddr field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the src_ipaddr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \
+ UINT32_C(0x40)
+ /* This bit must be '1' for the dst_ipaddr field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the dst_ipaddr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \
+ UINT32_C(0x100)
+ /* This bit must be '1' for the ip_protocol field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x200)
+ /* This bit must be '1' for the src_port field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the src_port_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
+ UINT32_C(0x800)
+ /* This bit must be '1' for the dst_port field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the dst_port_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
+ UINT32_C(0x2000)
+ /* This bit must be '1' for the pri_hint field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the ntuple_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \
+ UINT32_C(0x8000)
+ /* This bit must be '1' for the dst_id field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x20000)
+ /* This bit must be '1' for the dst_macaddr field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x40000)
+ uint64_t l2_filter_id;
+ /*
+ * This value identifies a set of CFA data structures used for
+ * an L2 context.
+ */
+ uint8_t src_macaddr[6];
+ /*
+ * This value indicates the source MAC address in the Ethernet
+ * header.
+ */
+ uint16_t ethertype;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint8_t ip_addr_type;
+ /*
+ * This value indicates the type of IP address. 4 - IPv4 6 -
+ * IPv6 All others are invalid.
+ */
+ /* invalid */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ uint8_t ip_protocol;
+ /*
+ * The value of protocol filed in IP header. Applies to UDP and
+ * TCP traffic. 6 - TCP 17 - UDP
+ */
+ /* invalid */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the Logical VNIC ID of the
+ * destination VNIC for the RX path and network port id of the
+ * destination port for the TX path.
+ */
+ uint16_t mirror_vnic_id;
+ /* Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint8_t tunnel_type;
+ /*
+ * This value indicates the tunnel type for this filter. If this
+ * field is not specified, then the filter shall apply to both
+ * non-tunneled and tunneled packets. If this field conflicts
+ * with the tunnel_type specified in the l2_filter_id, then the
+ * HWRM shall return an error for this command.
+ */
+ /* Non-tunnel */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /*
+ * Network Virtualization Generic Routing
+ * Encapsulation (NVGRE)
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /*
+ * Generic Routing Encapsulation (GRE) inside
+ * Ethernet payload
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
+ /*
+ * Generic Routing Encapsulation (GRE) inside IP
+ * datagram payload
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ uint8_t pri_hint;
+ /*
+ * This hint is provided to help in placing the filter in the
+ * filter table.
+ */
+ /* No preference */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ UINT32_C(0x0)
+ /* Above the given filter */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1)
+ /* Below the given filter */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2)
+ /* As high as possible */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \
+ UINT32_C(0x3)
+ /* As low as possible */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4)
+ uint32_t src_ipaddr[4];
+ /*
+ * The value of source IP address to be used in filtering. For
+ * IPv4, first four bytes represent the IP address.
+ */
+ uint32_t src_ipaddr_mask[4];
+ /*
+ * The value of source IP address mask to be used in filtering.
+ * For IPv4, first four bytes represent the IP address mask.
+ */
+ uint32_t dst_ipaddr[4];
+ /*
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t dst_ipaddr_mask[4];
+ /*
+ * The value of destination IP address mask to be used in
+ * filtering. For IPv4, first four bytes represent the IP
+ * address mask.
+ */
+ uint16_t src_port;
+ /*
+ * The value of source port to be used in filtering. Applies to
+ * UDP and TCP traffic.
+ */
+ uint16_t src_port_mask;
+ /*
+ * The value of source port mask to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port_mask;
+ /*
+ * The value of destination port mask to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint64_t ntuple_filter_id_hint;
+ /* This is the ID of the filter that goes along with the pri_hint. */
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint64_t ntuple_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t flow_id;
+ /*
+ * This is the ID of the flow associated with this filter. This
+ * value shall be used to match and associate the flow
+ * identifier returned in completion records. A value of
+ * 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ uint8_t code;
+ /*
+ * command specific error codes that goes to the cmd_err field
+ * in Common HWRM Error Response.
+ */
+ /* Unknown error */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /*
+ * Unable to complete operation due to conflict
+ * with Rx Mask VLAN
+ */
+ #define \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
+ UINT32_C(0x1)
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Description: Free an ntuple filter */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t ntuple_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/*
+ * Description: Configure an ntuple filter with a new destination VNIC and/or
+ * meter.
+ */
+/* Input (48 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the new_dst_id field to be configured. */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the new_meter_instance_id field to
+ * be configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ UINT32_C(0x4)
+ uint32_t unused_0;
+ uint64_t ntuple_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t new_dst_id;
+ /*
+ * If set, this value shall represent the new Logical VNIC ID of
+ * the destination VNIC for the RX path and new network port id
+ * of the destination port for the TX path.
+ */
+ uint32_t new_mirror_vnic_id;
+ /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint16_t new_meter_instance_id;
+ /*
+ * New meter to attach to the flow. Specifying the invalid
+ * instance ID is used to remove any existing meter from the
+ * flow.
+ */
+ /*
+ * A value of 0xfff is considered invalid and
+ * implies the instance is not configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ uint16_t unused_1[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_alloc */
+/*
+ * Description: This is a generic Exact Match (EM) flow that uses fields from
+ * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All
+ * L2/L3/L4 header fields are specified in network byte order. For each EM flow,
+ * there is an associated set of actions specified. For tunneled packets, all
+ * L2/L3/L4 fields specified are fields of inner headers unless otherwise
+ * specified. # If a field specified in this command is not enabled as a valid
+ * field, then that field shall not be used in matching packet header fields
+ * against this EM flow entry.
+ */
+/* Input (112 bytes) */
+struct hwrm_cfa_em_flow_alloc_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
+ /* rx path */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \
+ CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates enabling of a byte counter for
+ * a given flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates enabling of a packet counter
+ * for a given flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
+ /*
+ * Setting of this flag indicates de-capsulation action for the
+ * given flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
+ /*
+ * Setting of this flag indicates encapsulation action for the
+ * given flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
+ /*
+ * Setting of this flag indicates drop action. If this flag is
+ * not set, then it should be considered accept action.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
+ /*
+ * Setting of this flag indicates that a meter is expected to be
+ * attached to this flow. This hint can be used when choosing
+ * the action record format required for the flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
+ uint32_t enables;
+ /* This bit must be '1' for the l2_filter_id field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1)
+ /* This bit must be '1' for the tunnel_type field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2)
+ /* This bit must be '1' for the tunnel_id field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4)
+ /* This bit must be '1' for the src_macaddr field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8)
+ /* This bit must be '1' for the dst_macaddr field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10)
+ /* This bit must be '1' for the ovlan_vid field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20)
+ /* This bit must be '1' for the ivlan_vid field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40)
+ /* This bit must be '1' for the ethertype field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80)
+ /* This bit must be '1' for the src_ipaddr field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100)
+ /* This bit must be '1' for the dst_ipaddr field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200)
+ /* This bit must be '1' for the ipaddr_type field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400)
+ /* This bit must be '1' for the ip_protocol field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800)
+ /* This bit must be '1' for the src_port field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000)
+ /* This bit must be '1' for the dst_port field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000)
+ /* This bit must be '1' for the dst_id field to be configured. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the encap_record_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the meter_instance_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
+ UINT32_C(0x20000)
+ uint64_t l2_filter_id;
+ /*
+ * This value identifies a set of CFA data structures used for
+ * an L2 context.
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Non-tunnel */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /*
+ * Network Virtualization Generic Routing
+ * Encapsulation (NVGRE)
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
+ /*
+ * Generic Routing Encapsulation (GRE) inside
+ * Ethernet payload
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
+ /*
+ * Generic Routing Encapsulation (GRE) inside IP
+ * datagram payload
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ /*
+ * IPV4 over virtual eXtensible Local Area
+ * Network (IPV4oVXLAN)
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ uint8_t unused_0;
+ uint16_t unused_1;
+ uint32_t tunnel_id;
+ /*
+ * Tunnel identifier. Virtual Network Identifier (VNI). Only
+ * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower
+ * 24-bits of VNI field are used in setting up the filter.
+ */
+ uint8_t src_macaddr[6];
+ /*
+ * This value indicates the source MAC address in the Ethernet
+ * header.
+ */
+ uint16_t meter_instance_id;
+ /* The meter instance to attach to the flow. */
+ /*
+ * A value of 0xfff is considered invalid and
+ * implies the instance is not configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ uint8_t dst_macaddr[6];
+ /*
+ * This value indicates the destination MAC address in the
+ * Ethernet header.
+ */
+ uint16_t ovlan_vid;
+ /*
+ * This value indicates the VLAN ID of the outer VLAN tag in the
+ * Ethernet header.
+ */
+ uint16_t ivlan_vid;
+ /*
+ * This value indicates the VLAN ID of the inner VLAN tag in the
+ * Ethernet header.
+ */
+ uint16_t ethertype;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint8_t ip_addr_type;
+ /*
+ * This value indicates the type of IP address. 4 - IPv4 6 -
+ * IPv6 All others are invalid.
+ */
+ /* invalid */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
+ uint8_t ip_protocol;
+ /*
+ * The value of protocol filed in IP header. Applies to UDP and
+ * TCP traffic. 6 - TCP 17 - UDP
+ */
+ /* invalid */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint32_t src_ipaddr[4];
+ /*
+ * The value of source IP address to be used in filtering. For
+ * IPv4, first four bytes represent the IP address.
+ */
+ uint32_t dst_ipaddr[4];
+ /*
+ * big_endian = True The value of destination IP address to be
+ * used in filtering. For IPv4, first four bytes represent the
+ * IP address.
+ */
+ uint16_t src_port;
+ /*
+ * The value of source port to be used in filtering. Applies to
+ * UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the Logical VNIC ID of the
+ * destination VNIC for the RX path and network port id of the
+ * destination port for the TX path.
+ */
+ uint16_t mirror_vnic_id;
+ /* Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint32_t encap_record_id;
+ /* Logical ID of the encapsulation record. */
+ uint32_t unused_4;
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+struct hwrm_cfa_em_flow_alloc_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint64_t em_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t flow_id;
+ /*
+ * This is the ID of the flow associated with this filter. This
+ * value shall be used to match and associate the flow
+ * identifier returned in completion records. A value of
+ * 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_free */
+/* Description: Free an EM flow table entry */
+/* Input (24 bytes) */
+struct hwrm_cfa_em_flow_free_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t em_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_em_flow_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_cfa_em_flow_cfg */
+/*
+ * Description: Configure an EM flow with a new destination VNIC and/or meter.
+ */
+/* Input (48 bytes) */
+struct hwrm_cfa_em_flow_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
+ /* This bit must be '1' for the new_dst_id field to be configured. */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the new_meter_instance_id field to
+ * be configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ UINT32_C(0x4)
+ uint32_t unused_0;
+ uint64_t em_filter_id;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t new_dst_id;
+ /*
+ * If set, this value shall represent the new Logical VNIC ID of
+ * the destination VNIC for the RX path and network port id of
+ * the destination port for the TX path.
+ */
+ uint32_t new_mirror_vnic_id;
+ /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint16_t new_meter_instance_id;
+ /*
+ * New meter to attach to the flow. Specifying the invalid
+ * instance ID is used to remove any existing meter from the
+ * flow.
+ */
+ /*
+ * A value of 0xfff is considered invalid and
+ * implies the instance is not configured.
+ */
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ uint16_t unused_1[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_em_flow_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_tunnel_dst_port_query */
/*
* Description: This function is called by a driver to query tunnel type
@@ -9591,6 +10674,12 @@ struct hwrm_tunnel_dst_port_query_input {
/* Generic Network Virtualization Encapsulation (Geneve) */
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
UINT32_C(0x5)
+ /*
+ * IPV4 over virtual eXtensible Local Area
+ * Network (IPV4oVXLAN)
+ */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
uint8_t unused_0[7];
} __attribute__((packed));
@@ -9691,6 +10780,12 @@ struct hwrm_tunnel_dst_port_alloc_input {
/* Generic Network Virtualization Encapsulation (Geneve) */
#define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
UINT32_C(0x5)
+ /*
+ * IPV4 over virtual eXtensible Local Area
+ * Network (IPV4oVXLAN)
+ */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
uint8_t unused_0;
uint16_t tunnel_dst_port_val;
/*
@@ -9781,6 +10876,12 @@ struct hwrm_tunnel_dst_port_free_input {
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
/* Generic Network Virtualization Encapsulation (Geneve) */
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /*
+ * IPV4 over virtual eXtensible Local Area
+ * Network (IPV4oVXLAN)
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
uint8_t unused_0;
uint16_t tunnel_dst_port_id;
/*
@@ -9984,77 +11085,9 @@ struct hwrm_stat_ctx_free_output {
*/
} __attribute__((packed));
-/* hwrm_stat_ctx_clr_stats */
-/* Description: This command clears statistics of a context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_clr_stats_input {
- uint16_t req_type;
- /*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
- */
- uint16_t cmpl_ring;
- /*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
- */
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
- /*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
- */
- uint64_t resp_addr;
- /*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
- */
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_clr_stats_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
- */
-} __attribute__((packed));
-
/* hwrm_stat_ctx_query */
/* Description: This command returns statistics of a context. */
/* Input (24 bytes) */
-
struct hwrm_stat_ctx_query_input {
uint16_t req_type;
/*
@@ -10087,7 +11120,6 @@ struct hwrm_stat_ctx_query_input {
} __attribute__((packed));
/* Output (176 bytes) */
-
struct hwrm_stat_ctx_query_output {
uint16_t error_code;
/*
@@ -10158,6 +11190,73 @@ struct hwrm_stat_ctx_query_output {
*/
} __attribute__((packed));
+/* hwrm_stat_ctx_clr_stats */
+/* Description: This command clears statistics of a context. */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t stat_ctx_id;
+ /* ID of the statistics context that is being queried. */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_exec_fwd_resp */
/*
* Description: This command is used to send an encapsulated request to the
@@ -10331,6 +11430,310 @@ struct hwrm_reject_fwd_resp_output {
*/
} __attribute__((packed));
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint64_t host_dest_addr;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+} __attribute__((packed));
+
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint16_t dir_idx;
+ uint16_t unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t entries;
+ /* Number of directory entries in the directory. */
+ uint32_t entry_length;
+ /* Size of each directory entry, in bytes. */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_nvm_write */
+/*
+ * Note: Write to the allocated NVRAM of an item referenced by an existing
+ * directory entry.
+ */
+/* Input (48 bytes) */
+struct hwrm_nvm_write_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t host_src_addr;
+ /* 64-bit Host Source Address. This is where the source data is. */
+ uint16_t dir_type;
+ /*
+ * The Directory Entry Type (valid values are defined in the
+ * bnxnvm_directory_type enum defined in the file
+ * bnxnvm_defs.h).
+ */
+ uint16_t dir_ordinal;
+ /*
+ * Directory ordinal. The 0-based instance of the combined
+ * Directory Entry Type and Extension.
+ */
+ uint16_t dir_ext;
+ /*
+ * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the
+ * file bnxnvm_defs.h).
+ */
+ uint16_t dir_attr;
+ /*
+ * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the
+ * file bnxnvm_defs.h).
+ */
+ uint32_t dir_data_length;
+ /*
+ * Length of data to write, in bytes. May be less than or equal
+ * to the allocated size for the directory entry. The data
+ * length stored in the directory entry will be updated to
+ * reflect this value once the write is complete.
+ */
+ uint16_t option;
+ /* Option. */
+ uint16_t flags;
+ /*
+ * When this bit is '1', the original active image will not be
+ * removed. TBD: what purpose is this?
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1)
+ uint32_t dir_item_length;
+ /*
+ * The requested length of the allocated NVM for the item, in
+ * bytes. This value may be greater than or equal to the
+ * specified data length (dir_data_length). If this value is
+ * less than the specified data length, it will be ignored. The
+ * response will contain the actual allocated item length, which
+ * may be greater than the requested item length. The purpose
+ * for allocating more than the required number of bytes for an
+ * item's data is to pre-allocate extra storage (padding) to
+ * accommodate the potential future growth of an item (e.g.
+ * upgraded firmware with a size increase, log growth, expanded
+ * configuration data).
+ */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t dir_item_length;
+ /*
+ * Length of the allocated NVM for the item, in bytes. The value
+ * may be greater than or equal to the specified data length or
+ * the requested item length. The actual item length used when
+ * creating a new directory entry will be a multiple of an NVM
+ * block size.
+ */
+ uint16_t dir_idx;
+ /* The directory index of the created or modified item. */
+ uint8_t unused_0;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_nvm_read */
+/*
+ * Note: Read the contents of an NVRAM item as referenced (indexed) by an
+ * existing directory entry.
+ */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t host_dest_addr;
+ /*
+ * 64-bit Host Destination Address. This is the host address
+ * where the data will be written to.
+ */
+ uint16_t dir_idx;
+ /* The 0-based index of the directory entry. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t offset;
+ /* The NVRAM byte-offset to read from. */
+ uint32_t len;
+ /* The length of the data to be read, in bytes. */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* Hardware Resource Manager Specification */
/* Description: This structure is used to specify port description. */
/*
@@ -10391,11 +11794,28 @@ struct output {
/* Short Command Structure (16 bytes) */
struct hwrm_short_input {
uint16_t req_type;
+ /*
+ * This field indicates the type of request in the request
+ * buffer. The format for the rest of the command (request) is
+ * determined by this field.
+ */
uint16_t signature;
- #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD (UINT32_C(0x4321))
+ /*
+ * This field indicates a signature that is used to identify
+ * short form of the command listed here. This field shall be
+ * set to 17185 (0x4321).
+ */
+ /* Signature indicating this is a short form of HWRM command */
+ #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
uint16_t unused_0;
+ /* Reserved for future use. */
uint16_t size;
+ /* This value indicates the length of the request. */
uint64_t req_addr;
+ /*
+ * This is the host address where the request was written. This
+ * area must be 16B aligned.
+ */
} __attribute__((packed));
#define HWRM_GET_HWRM_ERROR_CODE(arg) \
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index c343d903..a3134074 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -67,7 +67,7 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
true : false;
}
-int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on)
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
@@ -108,12 +108,12 @@ rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr)
vnic->bd_stall = !(*on);
}
-int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on)
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
uint32_t i;
- int rc;
+ int rc = -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
@@ -159,7 +159,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on)
return rc;
}
-int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
@@ -191,7 +191,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
return rc;
}
-int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *eth_dev;
@@ -241,7 +241,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
return rc;
}
-int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
@@ -294,7 +294,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
return rc;
}
-int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
@@ -322,9 +322,6 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
if (vf >= dev_info.max_vfs)
return -EINVAL;
- if (on == bp->pf.vf_info[vf].vlan_spoof_en)
- return 0;
-
rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on);
if (!rc) {
bp->pf.vf_info[vf].vlan_spoof_en = on;
@@ -350,7 +347,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr)
}
int
-rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
@@ -385,7 +382,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
return rc;
}
-int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on)
{
struct rte_eth_dev *dev;
@@ -409,20 +406,19 @@ int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
if (vf >= bp->pdev->max_vfs)
return -EINVAL;
- if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) {
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) {
- RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n");
- return -ENOTSUP;
- }
+ /* Is this really the correct mapping? VFd seems to think it is. */
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ flag |= BNXT_VNIC_INFO_PROMISC;
if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
flag |= BNXT_VNIC_INFO_BCAST;
if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
- flag |= BNXT_VNIC_INFO_ALLMULTI;
+ flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
if (on)
bp->pf.vf_info[vf].l2_rx_mask |= flag;
@@ -477,7 +473,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
return rc;
}
-int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on)
{
struct bnxt_vlan_table_entry *ve;
@@ -570,7 +566,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
return rc;
}
-int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats)
{
@@ -598,7 +594,7 @@ int rte_pmd_bnxt_get_vf_stats(uint8_t port,
return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats);
}
-int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
uint16_t vf_id)
{
struct rte_eth_dev *dev;
@@ -625,7 +621,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
}
-int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id)
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
@@ -651,7 +647,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id)
return bnxt_vf_vnic_count(bp, vf_id);
}
-int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
uint64_t *count)
{
struct rte_eth_dev *dev;
@@ -679,7 +675,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
count);
}
-int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
uint32_t vf_id)
{
struct rte_eth_dev *dev;
@@ -710,7 +706,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
/* If the VF currently uses a random MAC, update default to this one */
if (bp->pf.vf_info[vf_id].random_mac) {
if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0)
- rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
+ bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr);
}
/* query the default VNIC id used by the function */
@@ -731,7 +727,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
(HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
- bnxt_hwrm_clear_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
break;
}
}
@@ -749,14 +745,14 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
/* Do not add a filter for the default MAC */
if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
- rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);
exit:
return rc;
}
int
-rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id)
{
struct rte_eth_dev *dev;
@@ -793,7 +789,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
return rc;
}
-int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
index c4c4770e..f881d30d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.h
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -78,7 +78,7 @@ struct rte_pmd_bnxt_mb_event_param {
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Set the VF MAC address.
@@ -94,7 +94,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr);
/**
@@ -115,7 +115,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
@@ -134,7 +134,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id);
/**
@@ -156,7 +156,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on);
/**
@@ -173,7 +173,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on);
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on);
/**
* set all queues drop enable bit
@@ -189,7 +189,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on);
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on);
/**
* Set the VF rate limit.
@@ -207,7 +207,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
/**
@@ -226,7 +226,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats);
@@ -242,7 +242,7 @@ int rte_pmd_bnxt_get_vf_stats(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
uint16_t vf_id);
/**
@@ -261,7 +261,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Set RX L2 Filtering mode of a VF of an Ethernet device.
@@ -280,7 +280,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on);
/**
@@ -297,7 +297,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
* - (-ENOMEM) on an allocation failure
* - (-1) firmware interface error
*/
-int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id);
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id);
/**
* Queries the TX drop counter for the function
@@ -313,7 +313,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id);
* - (-EINVAL) invalid vf_id specified.
* - (-ENOTSUP) Ethernet device is not a PF
*/
-int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
uint64_t *count);
/**
@@ -331,7 +331,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
* - (-ENOTSUP) Ethernet device is not a PF
* - (-ENOMEM) on an allocation failure
*/
-int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
uint32_t vf_id);
/**
@@ -350,5 +350,5 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on);
#endif /* _PMD_BNXT_H_ */
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 910c932d..dea1bd5c 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -38,10 +38,14 @@ LIB = librte_pmd_bond.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cmdline
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
-EXPORT_MAP := rte_eth_bond_version.map
+EXPORT_MAP := rte_pmd_bond_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h
index 8efbf071..87ff2917 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -151,7 +151,7 @@ rte_eth_bond_free(const char *name);
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Remove a slave rte_eth_dev device from the bonded device
@@ -163,7 +163,7 @@ rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id);
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Set link bonding mode of bonded device
@@ -175,7 +175,7 @@ rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id);
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode);
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode);
/**
* Get link bonding mode of bonded device
@@ -186,7 +186,7 @@ rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode);
* link bonding mode on success, negative value otherwise
*/
int
-rte_eth_bond_mode_get(uint8_t bonded_port_id);
+rte_eth_bond_mode_get(uint16_t bonded_port_id);
/**
* Set slave rte_eth_dev as primary slave of bonded device
@@ -198,7 +198,7 @@ rte_eth_bond_mode_get(uint8_t bonded_port_id);
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Get primary slave of bonded device
@@ -209,7 +209,7 @@ rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id);
* Port Id of primary slave on success, -1 on failure
*/
int
-rte_eth_bond_primary_get(uint8_t bonded_port_id);
+rte_eth_bond_primary_get(uint16_t bonded_port_id);
/**
* Populate an array with list of the slaves port id's of the bonded device
@@ -223,7 +223,8 @@ rte_eth_bond_primary_get(uint8_t bonded_port_id);
* negative value otherwise
*/
int
-rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len);
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
/**
* Populate an array with list of the active slaves port id's of the bonded
@@ -238,8 +239,8 @@ rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len);
* negative value otherwise
*/
int
-rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
- uint8_t len);
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
/**
* Set explicit MAC address to use on bonded device and it's slaves.
@@ -252,7 +253,7 @@ rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
struct ether_addr *mac_addr);
/**
@@ -265,7 +266,7 @@ rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mac_address_reset(uint8_t bonded_port_id);
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id);
/**
* Set the transmit policy for bonded device to use when it is operating in
@@ -279,7 +280,7 @@ rte_eth_bond_mac_address_reset(uint8_t bonded_port_id);
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy);
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy);
/**
* Get the transmit policy set on bonded device for balance mode operation
@@ -290,7 +291,7 @@ rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy);
* Balance transmit policy on success, negative value otherwise.
*/
int
-rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id);
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id);
/**
* Set the link monitoring frequency (in ms) for monitoring the link status of
@@ -304,7 +305,7 @@ rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id);
*/
int
-rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms);
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms);
/**
* Get the current link monitoring frequency (in ms) for monitoring of the link
@@ -316,7 +317,7 @@ rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms);
* Monitoring interval on success, negative value otherwise.
*/
int
-rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id);
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id);
/**
@@ -330,7 +331,8 @@ rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id);
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
/**
* Get the period in milliseconds set for delaying the disabling of a bonded
@@ -342,7 +344,7 @@ rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
* Delay period on success, negative value otherwise.
*/
int
-rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id);
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id);
/**
* Set the period in milliseconds for delaying the enabling of a bonded link
@@ -355,7 +357,8 @@ rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id);
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
/**
* Get the period in milliseconds set for delaying the enabling of a bonded
@@ -367,7 +370,7 @@ rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
* Delay period on success, negative value otherwise.
*/
int
-rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id);
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);
#ifdef __cplusplus
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 20b5a896..eee9e502 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -209,7 +209,7 @@ set_warning_flags(struct port *port, uint16_t flags)
}
static void
-show_warnings(uint8_t slave_id)
+show_warnings(uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
uint8_t warnings;
@@ -278,7 +278,7 @@ record_default(struct port *port)
* @param port Port on which LACPDU was received.
*/
static void
-rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
+rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
struct lacpdu *lacp)
{
struct port *agg, *port = &mode_8023ad_ports[slave_id];
@@ -399,7 +399,7 @@ rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
* @param port Port to handle state machine.
*/
static void
-periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
+periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
/* Calculate if either site is LACP enabled */
@@ -461,7 +461,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
* @param port Port to handle state machine.
*/
static void
-mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
+mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
@@ -564,7 +564,7 @@ mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
* @param port
*/
static void
-tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
+tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *agg, *port = &mode_8023ad_ports[slave_id];
@@ -688,11 +688,11 @@ static void
selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
{
struct port *agg, *port;
- uint8_t slaves_count, new_agg_id, i, j = 0;
- uint8_t *slaves;
+ uint16_t slaves_count, new_agg_id, i, j = 0;
+ uint16_t *slaves;
uint64_t agg_bandwidth[8] = {0};
uint64_t agg_count[8] = {0};
- uint8_t default_slave = 0;
+ uint16_t default_slave = 0;
uint8_t mode_count_id, mode_band_id;
struct rte_eth_link link_info;
@@ -923,7 +923,8 @@ bond_mode_8023ad_periodic_cb(void *arg)
}
void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
+ uint16_t slave_id)
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
@@ -951,7 +952,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
memcpy(&port->actor, &initial, sizeof(struct port_params));
/* Standard requires that port ID must be grater than 0.
* Add 1 do get corresponding port_number */
- port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
+ port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
memcpy(&port->partner, &initial, sizeof(struct port_params));
@@ -1021,37 +1022,30 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
}
int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
- uint8_t slave_id)
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
+ uint16_t slave_id)
{
- struct bond_dev_private *internals = bond_dev->data->dev_private;
void *pkt = NULL;
- struct port *port;
- uint8_t i;
+ struct port *port = NULL;
+ uint8_t old_partner_state;
- /* Given slave must be in active list */
- RTE_ASSERT(find_slave_by_id(internals->active_slaves,
- internals->active_slave_count, slave_id) < internals->active_slave_count);
+ port = &mode_8023ad_ports[slave_id];
- /* Exclude slave from transmit policy. If this slave is an aggregator
- * make all aggregated slaves unselected to force selection logic
- * to select suitable aggregator for this port. */
- for (i = 0; i < internals->active_slave_count; i++) {
- port = &mode_8023ad_ports[internals->active_slaves[i]];
- if (port->aggregator_port_id != slave_id)
- continue;
+ ACTOR_STATE_CLR(port, AGGREGATION);
+ port->selected = UNSELECTED;
- port->selected = UNSELECTED;
+ old_partner_state = port->partner_state;
+ record_default(port);
- /* Use default aggregator */
- port->aggregator_port_id = internals->active_slaves[i];
- }
+ /* If partner timeout state changes then disable timer */
+ if (!((old_partner_state ^ port->partner_state) &
+ STATE_LACP_SHORT_TIMEOUT))
+ timer_cancel(&port->current_while_timer);
- port = &mode_8023ad_ports[slave_id];
- port->selected = UNSELECTED;
- port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
- STATE_COLLECTING);
+ PARTNER_STATE_CLR(port, AGGREGATION);
+ ACTOR_STATE_CLR(port, EXPIRED);
+ /* flush rx/tx rings */
while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
rte_pktmbuf_free((struct rte_mbuf *)pkt);
@@ -1066,7 +1060,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
struct bond_dev_private *internals = bond_dev->data->dev_private;
struct ether_addr slave_addr;
struct port *slave, *agg_slave;
- uint8_t slave_id, i, j;
+ uint16_t slave_id, i, j;
bond_mode_8023ad_stop(bond_dev);
@@ -1111,27 +1105,6 @@ bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
conf->update_timeout_ms = mode4->update_timeout_us / 1000;
conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
-}
-
-static void
-bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
-
- bond_mode_8023ad_conf_get(dev, conf);
- conf->slowrx_cb = mode4->slowrx_cb;
-}
-
-static void
-bond_mode_8023ad_conf_get_v1708(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
-
- bond_mode_8023ad_conf_get(dev, conf);
conf->slowrx_cb = mode4->slowrx_cb;
conf->agg_selection = mode4->agg_selection;
}
@@ -1171,27 +1144,6 @@ bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
mode4->dedicated_queues.tx_qid = UINT16_MAX;
}
-static void
-bond_mode_8023ad_setup_v20(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_bond_8023ad_conf def_conf;
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
-
- if (conf == NULL) {
- conf = &def_conf;
- bond_mode_8023ad_conf_get_default(conf);
- }
-
- bond_mode_8023ad_stop(dev);
- bond_mode_8023ad_conf_assign(mode4, conf);
-
- if (dev->data->dev_started)
- bond_mode_8023ad_start(dev);
-}
-
-
void
bond_mode_8023ad_setup(struct rte_eth_dev *dev,
struct rte_eth_bond_8023ad_conf *conf)
@@ -1207,27 +1159,6 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
bond_mode_8023ad_stop(dev);
bond_mode_8023ad_conf_assign(mode4, conf);
-
-
- if (dev->data->dev_started)
- bond_mode_8023ad_start(dev);
-}
-
-static void
-bond_mode_8023ad_setup_v1708(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_bond_8023ad_conf def_conf;
- struct bond_dev_private *internals = dev->data->dev_private;
- struct mode8023ad_private *mode4 = &internals->mode4;
-
- if (conf == NULL) {
- conf = &def_conf;
- bond_mode_8023ad_conf_get_default(conf);
- }
-
- bond_mode_8023ad_stop(dev);
- bond_mode_8023ad_conf_assign(mode4, conf);
mode4->slowrx_cb = conf->slowrx_cb;
mode4->agg_selection = AGG_STABLE;
@@ -1277,7 +1208,7 @@ bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
void
bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint8_t slave_id, struct rte_mbuf *pkt)
+ uint16_t slave_id, struct rte_mbuf *pkt)
{
struct mode8023ad_private *mode4 = &internals->mode4;
struct port *port = &mode_8023ad_ports[slave_id];
@@ -1358,7 +1289,7 @@ free_out:
}
int
-rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
@@ -1373,49 +1304,9 @@ rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
bond_mode_8023ad_conf_get(bond_dev, conf);
return 0;
}
-VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v20, 2.0);
int
-rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
-
- if (valid_bonded_port_id(port_id) != 0)
- return -EINVAL;
-
- if (conf == NULL)
- return -EINVAL;
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
- return 0;
-}
-VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
-
-int
-rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
-
- if (valid_bonded_port_id(port_id) != 0)
- return -EINVAL;
-
- if (conf == NULL)
- return -EINVAL;
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_conf_get_v1708(bond_dev, conf);
- return 0;
-}
-MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_conf_get_v1708);
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1708, 17.08);
-
-int
-rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
enum rte_bond_8023ad_agg_selection agg_selection)
{
struct rte_eth_dev *bond_dev;
@@ -1437,7 +1328,7 @@ rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
return 0;
}
-int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
+int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
{
struct rte_eth_dev *bond_dev;
struct bond_dev_private *internals;
@@ -1458,7 +1349,7 @@ int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
static int
-bond_8023ad_setup_validate(uint8_t port_id,
+bond_8023ad_setup_validate(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
if (valid_bonded_port_id(port_id) != 0)
@@ -1482,26 +1373,9 @@ bond_8023ad_setup_validate(uint8_t port_id,
return 0;
}
-int
-rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
- int err;
-
- err = bond_8023ad_setup_validate(port_id, conf);
- if (err != 0)
- return err;
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_setup_v20(bond_dev, conf);
-
- return 0;
-}
-VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v20, 2.0);
int
-rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
+rte_eth_bond_8023ad_setup(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
@@ -1516,37 +1390,13 @@ rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
return 0;
}
-VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
-
-
-int
-rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf)
-{
- struct rte_eth_dev *bond_dev;
- int err;
-
- err = bond_8023ad_setup_validate(port_id, conf);
- if (err != 0)
- return err;
-
- bond_dev = &rte_eth_devices[port_id];
- bond_mode_8023ad_setup_v1708(bond_dev, conf);
-
- return 0;
-}
-BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1708, 17.08);
-MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf),
- rte_eth_bond_8023ad_setup_v1708);
-
int
-rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
struct rte_eth_bond_8023ad_slave_info *info)
{
struct rte_eth_dev *bond_dev;
@@ -1579,7 +1429,7 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
}
static int
-bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
+bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
{
struct rte_eth_dev *bond_dev;
struct bond_dev_private *internals;
@@ -1607,7 +1457,8 @@ bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
}
int
-rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled)
{
struct port *port;
int res;
@@ -1627,7 +1478,8 @@ rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
}
int
-rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled)
{
struct port *port;
int res;
@@ -1647,7 +1499,7 @@ rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
}
int
-rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
{
struct port *port;
int err;
@@ -1661,7 +1513,7 @@ rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
}
int
-rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
{
struct port *port;
int err;
@@ -1675,7 +1527,7 @@ rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
}
int
-rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
struct rte_mbuf *lacp_pkt)
{
struct port *port;
@@ -1736,7 +1588,7 @@ bond_mode_8023ad_ext_periodic_cb(void *arg)
}
int
-rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
{
int retval = 0;
struct rte_eth_dev *dev = &rte_eth_devices[port];
@@ -1760,7 +1612,7 @@ rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
}
int
-rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
{
int retval = 0;
struct rte_eth_dev *dev = &rte_eth_devices[port];
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 1d353c73..2874336d 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -64,7 +64,7 @@ extern "C" {
#define MARKER_TLV_TYPE_INFO 0x01
#define MARKER_TLV_TYPE_RESP 0x02
-typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint8_t slave_id,
+typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t slave_id,
struct rte_mbuf *lacp_pkt);
enum rte_bond_8023ad_selection {
@@ -176,7 +176,7 @@ struct rte_eth_bond_8023ad_slave_info {
struct port_params actor;
uint8_t partner_state;
struct port_params partner;
- uint8_t agg_port_id;
+ uint16_t agg_port_id;
};
/**
@@ -192,16 +192,7 @@ struct rte_eth_bond_8023ad_slave_info {
* -EINVAL if conf is NULL
*/
int
-rte_eth_bond_8023ad_conf_get(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
/**
@@ -216,16 +207,7 @@ rte_eth_bond_8023ad_conf_get_v1708(uint8_t port_id,
* -EINVAL if configuration is invalid.
*/
int
-rte_eth_bond_8023ad_setup(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
- struct rte_eth_bond_8023ad_conf *conf);
-int
-rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
+rte_eth_bond_8023ad_setup(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
/**
@@ -241,7 +223,7 @@ rte_eth_bond_8023ad_setup_v1708(uint8_t port_id,
* bonded device or is not inactive).
*/
int
-rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
struct rte_eth_bond_8023ad_slave_info *conf);
#ifdef __cplusplus
@@ -259,7 +241,8 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled);
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled);
/**
* Get COLLECTING flag from slave port actor state.
@@ -272,7 +255,7 @@ rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled);
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id);
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id);
/**
* Configure a slave port to start distributing.
@@ -285,7 +268,8 @@ rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id);
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled);
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled);
/**
* Get DISTRIBUTING flag from slave port actor state.
@@ -298,7 +282,7 @@ rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled);
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id);
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id);
/**
* LACPDU transmit path for external 802.3ad state machine. Caller retains
@@ -312,7 +296,7 @@ rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id);
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
struct rte_mbuf *lacp_pkt);
/**
@@ -338,7 +322,7 @@ rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port_id);
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port_id);
/**
* Disable slow queue on slaves
@@ -355,7 +339,7 @@ rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port_id);
*
*/
int
-rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port_id);
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);
/*
* Get aggregator mode for 8023ad
@@ -365,7 +349,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port_id);
* agregator mode on success, negative value otherwise
*/
int
-rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id);
+rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
/**
* Set aggregator mode for 8023ad
@@ -374,6 +358,6 @@ rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id);
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
enum rte_bond_8023ad_agg_selection agg_selection);
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index d46e44a8..433c7000 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -279,7 +279,7 @@ bond_mode_8023ad_stop(struct rte_eth_dev *dev);
*/
void
bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint8_t slave_id, struct rte_mbuf *pkt);
+ uint16_t slave_id, struct rte_mbuf *pkt);
/**
* @internal
@@ -293,7 +293,7 @@ bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
* 0 on success, negative value otherwise.
*/
void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint8_t port_id);
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
/**
* @internal
@@ -307,7 +307,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint8_t port_id);
* 0 on success, negative value otherwise.
*/
int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos);
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos);
/**
* Updates state when MAC was changed on bonded device or one of its slaves.
@@ -318,12 +318,12 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
int
bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
- uint8_t slave_port);
+ uint16_t slave_port);
int
-bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port);
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port);
int
-bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id);
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id);
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index d9d37495..f7efbb78 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -148,7 +148,7 @@ void bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
rte_spinlock_unlock(&internals->mode6.lock);
}
-uint8_t
+uint16_t
bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
struct bond_dev_private *internals)
{
@@ -220,13 +220,13 @@ bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
return internals->current_primary_port;
}
-uint8_t
+uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
struct rte_mbuf *pkt, struct bond_dev_private *internals)
{
struct ether_hdr *eth_h;
struct arp_hdr *arp_h;
- uint8_t slave_idx;
+ uint16_t slave_idx;
rte_spinlock_lock(&internals->mode6.lock);
eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
diff --git a/drivers/net/bonding/rte_eth_bond_alb.h b/drivers/net/bonding/rte_eth_bond_alb.h
index fd7c3aeb..9f17f7c8 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.h
+++ b/drivers/net/bonding/rte_eth_bond_alb.h
@@ -51,7 +51,7 @@ struct client_data {
uint32_t cli_ip;
/**< Client IP address */
- uint8_t slave_idx;
+ uint16_t slave_idx;
/**< Index of slave on which we connect with that client */
uint8_t in_use;
/**< Flag indicating if entry in client table is currently used */
@@ -113,7 +113,7 @@ bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
* @return
* Index of slave on which packet should be sent.
*/
-uint8_t
+uint16_t
bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
struct bond_dev_private *internals);
@@ -127,7 +127,7 @@ bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
* @return
* Index of slawe on which packet should be sent.
*/
-uint8_t
+uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
struct rte_mbuf *pkt, struct bond_dev_private *internals);
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index de1d9e0d..980e6368 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -37,7 +37,7 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_tcp.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include "rte_eth_bond.h"
@@ -56,14 +56,14 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
}
int
-valid_bonded_port_id(uint8_t port_id)
+valid_bonded_port_id(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
}
int
-valid_slave_port_id(uint8_t port_id, uint8_t mode)
+valid_slave_port_id(uint16_t port_id, uint8_t mode)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
@@ -80,7 +80,7 @@ valid_slave_port_id(uint8_t port_id, uint8_t mode)
}
void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint8_t active_count = internals->active_slave_count;
@@ -107,11 +107,11 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
}
void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
- uint8_t slave_pos;
+ uint16_t slave_pos;
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t active_count = internals->active_slave_count;
+ uint16_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD) {
bond_mode_8023ad_stop(eth_dev);
@@ -153,7 +153,7 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
struct bond_dev_private *internals;
char devargs[52];
- uint8_t port_id;
+ uint16_t port_id;
int ret;
if (name == NULL) {
@@ -193,7 +193,7 @@ rte_eth_bond_free(const char *name)
}
static int
-slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
@@ -233,7 +233,7 @@ slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id)
}
static int
-__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
struct bond_dev_private *internals;
@@ -302,8 +302,13 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
internals->tx_offload_capa &= dev_info.tx_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
- link_properties_valid(bonded_eth_dev,
- &slave_eth_dev->data->dev_link);
+ if (link_properties_valid(bonded_eth_dev,
+ &slave_eth_dev->data->dev_link) != 0) {
+ RTE_BOND_LOG(ERR, "Invalid link properties for slave %d"
+ " in bonding mode %d", slave_port_id,
+ internals->mode);
+ return -1;
+ }
/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
* the power of 2, the lower one is GCD
@@ -363,7 +368,7 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
}
int
-rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
@@ -387,7 +392,8 @@ rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
}
static int
-__eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+__eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
+ uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
@@ -466,7 +472,7 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
}
int
-rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
@@ -488,7 +494,7 @@ rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
}
int
-rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
{
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
@@ -497,7 +503,7 @@ rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
}
int
-rte_eth_bond_mode_get(uint8_t bonded_port_id)
+rte_eth_bond_mode_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
@@ -510,7 +516,7 @@ rte_eth_bond_mode_get(uint8_t bonded_port_id)
}
int
-rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct bond_dev_private *internals;
@@ -531,7 +537,7 @@ rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
}
int
-rte_eth_bond_primary_get(uint8_t bonded_port_id)
+rte_eth_bond_primary_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
@@ -547,7 +553,8 @@ rte_eth_bond_primary_get(uint8_t bonded_port_id)
}
int
-rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
{
struct bond_dev_private *internals;
uint8_t i;
@@ -570,8 +577,8 @@ rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
}
int
-rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
- uint8_t len)
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
{
struct bond_dev_private *internals;
@@ -586,13 +593,14 @@ rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
if (internals->active_slave_count > len)
return -1;
- memcpy(slaves, internals->active_slaves, internals->active_slave_count);
+ memcpy(slaves, internals->active_slaves,
+ internals->active_slave_count * sizeof(internals->active_slaves[0]));
return internals->active_slave_count;
}
int
-rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
struct ether_addr *mac_addr)
{
struct rte_eth_dev *bonded_eth_dev;
@@ -618,7 +626,7 @@ rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
}
int
-rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
@@ -647,7 +655,7 @@ rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
}
int
-rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
{
struct bond_dev_private *internals;
@@ -677,7 +685,7 @@ rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
}
int
-rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
@@ -690,7 +698,7 @@ rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
}
int
-rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
{
struct bond_dev_private *internals;
@@ -704,7 +712,7 @@ rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
}
int
-rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
@@ -717,7 +725,8 @@ rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
}
int
-rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms)
{
struct bond_dev_private *internals;
@@ -732,7 +741,7 @@ rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
}
int
-rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
@@ -745,7 +754,7 @@ rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
}
int
-rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
{
struct bond_dev_private *internals;
@@ -760,7 +769,7 @@ rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
}
int
-rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index bb634c62..e816da31 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -33,6 +33,7 @@
#include <rte_devargs.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_kvargs.h>
#include <cmdline_parse.h>
@@ -61,16 +62,6 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
unsigned i;
for (i = 0; i < rte_eth_dev_count(); i++) {
-
- /* Currently populated by rte_eth_copy_pci_info().
- *
- * TODO: Once the PCI bus has arrived we should have a better
- * way to test for being a PCI device or not.
- */
- if (rte_eth_devices[i].data->kdrv == RTE_KDRV_UNKNOWN ||
- rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
- continue;
-
pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
eth_pci_addr = &pci_dev->addr;
@@ -98,6 +89,16 @@ find_port_id_by_dev_name(const char *name)
return -1;
}
+static inline int
+bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr)
+{
+ struct rte_pci_device *pdev;
+ const struct rte_pci_addr *paddr = _pci_addr;
+
+ pdev = RTE_DEV_TO_PCI(*(struct rte_device **)(void *)&dev);
+ return rte_eal_compare_pci_addr(&pdev->addr, paddr);
+}
+
/**
* Parses a port identifier string to a port id by pci address, then by name,
* and finally port id.
@@ -106,10 +107,23 @@ static inline int
parse_port_id(const char *port_str)
{
struct rte_pci_addr dev_addr;
+ struct rte_bus *pci_bus;
+ struct rte_device *dev;
int port_id;
+ pci_bus = rte_bus_find_by_name("pci");
+ if (pci_bus == NULL) {
+ RTE_LOG(ERR, PMD, "unable to find PCI bus\n");
+ return -1;
+ }
+
/* try parsing as pci address, physical devices */
- if (eal_parse_pci_DomBDF(port_str, &dev_addr) == 0) {
+ if (pci_bus->parse(port_str, &dev_addr) == 0) {
+ dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr);
+ if (dev == NULL) {
+ RTE_LOG(ERR, PMD, "unable to find PCI device\n");
+ return -1;
+ }
port_id = find_port_id_by_pci_addr(&dev_addr);
if (port_id < 0)
return -1;
@@ -153,7 +167,7 @@ bond_ethdev_parse_slave_port_kvarg(const char *key,
return -1;
} else
slave_ports->slaves[slave_ports->slave_count++] =
- (uint8_t)port_id;
+ port_id;
}
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 3ee70baa..fe232895 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -43,7 +43,7 @@
#include <rte_ip_frag.h>
#include <rte_devargs.h>
#include <rte_kvargs.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
@@ -125,11 +125,12 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
}
static inline uint8_t
-is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
+is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
{
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
- return !vlan_tci && (ethertype == ether_type_slow_be &&
+ return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
+ (ethertype == ether_type_slow_be &&
(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
}
@@ -174,13 +175,14 @@ const struct rte_flow_attr flow_attr_8023ad = {
int
bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
- uint8_t slave_port) {
+ uint16_t slave_port) {
+ struct rte_eth_dev_info slave_info;
struct rte_flow_error error;
struct bond_dev_private *internals = (struct bond_dev_private *)
(bond_dev->data->dev_private);
- struct rte_flow_action_queue lacp_queue_conf = {
- .index = internals->mode4.dedicated_queues.rx_qid,
+ const struct rte_flow_action_queue lacp_queue_conf = {
+ .index = 0,
};
const struct rte_flow_action actions[] = {
@@ -195,19 +197,32 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
flow_item_8023ad, actions, &error);
- if (ret < 0)
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
+ __func__, error.message, slave_port,
+ internals->mode4.dedicated_queues.rx_qid);
return -1;
+ }
+
+ rte_eth_dev_info_get(slave_port, &slave_info);
+ if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
+ slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
+ RTE_BOND_LOG(ERR,
+ "%s: Slave %d capabilities doesn't allow to allocate additional queues",
+ __func__, slave_port);
+ return -1;
+ }
return 0;
}
int
-bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
struct bond_dev_private *internals = (struct bond_dev_private *)
(bond_dev->data->dev_private);
- struct rte_eth_dev_info bond_info, slave_info;
- uint8_t idx;
+ struct rte_eth_dev_info bond_info;
+ uint16_t idx;
/* Verify if all slaves in bonding supports flow director and */
if (internals->slave_count > 0) {
@@ -217,9 +232,6 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
for (idx = 0; idx < internals->slave_count; idx++) {
- rte_eth_dev_info_get(internals->slaves[idx].port_id,
- &slave_info);
-
if (bond_ethdev_8023ad_flow_verify(bond_dev,
internals->slaves[idx].port_id) != 0)
return -1;
@@ -230,7 +242,7 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
}
int
-bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port) {
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
struct rte_flow_error error;
struct bond_dev_private *internals = (struct bond_dev_private *)
@@ -270,10 +282,10 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
struct bond_dev_private *internals = bd_rx_q->dev_private;
uint16_t num_rx_total = 0; /* Total number of received packets */
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
- uint8_t i, idx;
+ uint16_t i, idx;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
@@ -302,8 +314,8 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* positions in slaves, not ID */
uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
uint8_t distributing_count;
@@ -394,8 +406,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count, idx;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count, idx;
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
@@ -444,7 +456,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
/* Remove packet from array if it is slow packet or slave is not
* in collecting state or bonding interface is not in promiscuous
* mode and packet address does not match. */
- if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
+ if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
!collecting || (!promisc &&
!is_multicast_ether_addr(&hdr->d_addr) &&
!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
@@ -526,7 +538,7 @@ ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
#define MAX_CLIENTS_NUMBER 128
uint8_t active_clients;
struct client_stats_t {
- uint8_t port;
+ uint16_t port;
uint32_t ipv4_addr;
uint32_t ipv4_rx_packets;
uint32_t ipv4_tx_packets;
@@ -534,7 +546,7 @@ struct client_stats_t {
struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
static void
-update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
+update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
{
int i = 0;
@@ -592,7 +604,7 @@ update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
static void
mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
- uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
+ uint16_t port, uint32_t __attribute__((unused)) *burstnumber)
{
struct ipv4_hdr *ipv4_h;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
@@ -673,8 +685,8 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t num_tx_total = 0, num_tx_slave;
@@ -904,7 +916,7 @@ bandwidth_cmp(const void *a, const void *b)
}
static void
-bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
+bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
struct bwg_slave *bwg_slave)
{
struct rte_eth_link link_status;
@@ -970,10 +982,10 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct rte_eth_dev *primary_port =
&rte_eth_devices[internals->primary_port];
uint16_t num_tx_total = 0;
- uint8_t i, j;
+ uint16_t i, j;
- uint8_t num_of_slaves = internals->active_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves = internals->active_slave_count;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
struct ether_hdr *ether_hdr;
struct ether_addr primary_slave_addr;
@@ -1059,7 +1071,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t num_send, num_not_send = 0;
uint16_t num_tx_total = 0;
- uint8_t slave_idx;
+ uint16_t slave_idx;
int i, j;
@@ -1178,8 +1190,8 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
@@ -1239,8 +1251,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* positions in slaves, not ID */
uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
uint8_t distributing_count;
@@ -1333,7 +1345,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
struct bond_tx_queue *bd_tx_q;
uint8_t tx_failed_flag = 0, num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t max_nb_of_tx_pkts = 0;
@@ -1861,7 +1873,7 @@ slave_add(struct bond_dev_private *internals,
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint8_t slave_port_id)
+ uint16_t slave_port_id)
{
int i;
@@ -2047,7 +2059,7 @@ bond_ethdev_close(struct rte_eth_dev *dev)
RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
while (internals->slave_count != skipped) {
- uint8_t port_id = internals->slaves[skipped].port_id;
+ uint16_t port_id = internals->slaves[skipped].port_id;
rte_eth_dev_stop(port_id);
@@ -2125,7 +2137,7 @@ static int
bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
int res;
- uint8_t i;
+ uint16_t i;
struct bond_dev_private *internals = dev->data->dev_private;
/* don't do this while a slave is being added */
@@ -2137,7 +2149,7 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
for (i = 0; i < internals->slave_count; i++) {
- uint8_t port_id = internals->slaves[i].port_id;
+ uint16_t port_id = internals->slaves[i].port_id;
res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
if (res == ENOTSUP)
@@ -2277,7 +2289,7 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
static int
bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- void (*link_update)(uint8_t port_id, struct rte_eth_link *eth_link);
+ void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
struct bond_dev_private *bond_ctx;
struct rte_eth_link slave_link;
@@ -2359,7 +2371,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
}
-static void
+static int
bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bond_dev_private *internals = dev->data->dev_private;
@@ -2387,6 +2399,8 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
}
+
+ return 0;
}
static void
@@ -2466,7 +2480,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
}
int
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
void *param, void *ret_param __rte_unused)
{
struct rte_eth_dev *bonded_eth_dev;
@@ -2747,8 +2761,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
}
eth_dev->dev_ops = &default_dev_ops;
- eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
- RTE_ETH_DEV_DETACHABLE;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
rte_spinlock_init(&internals->lock);
@@ -2827,6 +2840,7 @@ bond_probe(struct rte_vdev_device *dev)
struct rte_kvargs *kvlist;
uint8_t bonding_mode, socket_id/*, agg_mode*/;
int arg_count, port_id;
+ uint8_t agg_mode;
if (!dev)
return -EINVAL;
@@ -2884,6 +2898,25 @@ bond_probe(struct rte_vdev_device *dev)
internals = rte_eth_devices[port_id].data->dev_private;
internals->kvlist = kvlist;
+
+ if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
+ if (rte_kvargs_process(kvlist,
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to parse agg selection mode for bonded device %s\n",
+ name);
+ goto parse_error;
+ }
+
+ if (internals->mode == BONDING_MODE_8023AD)
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ } else {
+ rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
+ }
+
RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
"socket %u.\n", name, port_id, bonding_mode, socket_id);
return 0;
@@ -2951,7 +2984,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_kvargs *kvlist = internals->kvlist;
int arg_count;
- uint8_t port_id = dev - rte_eth_devices;
+ uint16_t port_id = dev - rte_eth_devices;
uint8_t agg_mode;
static const uint8_t default_rss_key[40] = {
@@ -3050,7 +3083,6 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
name);
}
if (internals->mode == BONDING_MODE_8023AD)
- if (agg_mode != 0)
rte_eth_bond_8023ad_agg_selection_set(port_id,
agg_mode);
}
@@ -3086,7 +3118,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
/* Parse/set primary slave port id*/
arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
if (arg_count == 1) {
- uint8_t primary_slave_port_id;
+ uint16_t primary_slave_port_id;
if (rte_kvargs_process(kvlist,
PMD_BOND_PRIMARY_SLAVE_KVARG,
@@ -3099,7 +3131,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
}
/* Set balance mode transmit policy*/
- if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
+ if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
!= 0) {
RTE_LOG(ERR, EAL,
"Failed to set primary slave port %d on bonded device %s\n",
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 1fe6ff88..1392da98 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -93,12 +93,12 @@ struct bond_tx_queue {
/** Bonded slave devices structure */
struct bond_ethdev_slave_ports {
- uint8_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
- uint8_t slave_count; /**< Number of slaves */
+ uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
+ uint16_t slave_count; /**< Number of slaves */
};
struct bond_slave_details {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t link_status_poll_enabled;
uint8_t link_status_wait_to_complete;
@@ -114,14 +114,14 @@ typedef uint16_t (*xmit_hash_t)(const struct rte_mbuf *buf, uint8_t slave_count)
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
- uint8_t port_id; /**< Port Id of Bonded Port */
+ uint16_t port_id; /**< Port Id of Bonded Port */
uint8_t mode; /**< Link Bonding Mode */
rte_spinlock_t lock;
- uint8_t primary_port; /**< Primary Slave Port */
- uint8_t current_primary_port; /**< Primary Slave Port */
- uint8_t user_defined_primary_port;
+ uint16_t primary_port; /**< Primary Slave Port */
+ uint16_t current_primary_port; /**< Primary Slave Port */
+ uint16_t user_defined_primary_port;
/**< Flag for whether primary port is user defined or not */
uint8_t balance_xmit_policy;
@@ -144,16 +144,17 @@ struct bond_dev_private {
uint16_t nb_rx_queues; /**< Total number of rx queues */
uint16_t nb_tx_queues; /**< Total number of tx queues*/
- uint8_t active_slave; /**< Next active_slave to poll */
- uint8_t active_slave_count; /**< Number of active slaves */
- uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
+ uint16_t active_slave; /**< Next active_slave to poll */
+ uint16_t active_slave_count; /**< Number of active slaves */
+ uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
- uint8_t slave_count; /**< Number of bonded slaves */
+ uint16_t slave_count; /**< Number of bonded slaves */
struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
/**< Arary of bonded slaves details */
struct mode8023ad_private mode4;
- uint8_t tlb_slaves_order[RTE_MAX_ETHPORTS]; /* TLB active slaves send order */
+ uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
+ /**< TLB active slaves send order */
struct mode_alb_private mode6;
uint32_t rx_offload_capa; /** Rx offload capability */
@@ -186,10 +187,10 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
/* Search given slave array to find position of given id.
* Return slave pos or slaves_count if not found. */
-static inline uint8_t
-find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
+static inline uint16_t
+find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) {
- uint8_t pos;
+ uint16_t pos;
for (pos = 0; pos < slaves_count; pos++) {
if (slave_id == slaves[pos])
break;
@@ -199,19 +200,19 @@ find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
}
int
-valid_port_id(uint8_t port_id);
+valid_port_id(uint16_t port_id);
int
-valid_bonded_port_id(uint8_t port_id);
+valid_bonded_port_id(uint16_t port_id);
int
-valid_slave_port_id(uint8_t port_id, uint8_t mode);
+valid_slave_port_id(uint16_t port_id, uint8_t mode);
void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
link_properties_set(struct rte_eth_dev *bonded_eth_dev,
@@ -255,10 +256,10 @@ xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count);
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint8_t slave_port_id);
+ uint16_t slave_port_id);
int
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
void *param, void *ret_param);
int
diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map
index 0f4e847d..ec3374b0 100644
--- a/drivers/net/bonding/rte_eth_bond_version.map
+++ b/drivers/net/bonding/rte_pmd_bond_version.map
@@ -1,8 +1,6 @@
DPDK_2.0 {
global:
- rte_eth_bond_8023ad_conf_get;
- rte_eth_bond_8023ad_setup;
rte_eth_bond_active_slaves_get;
rte_eth_bond_create;
rte_eth_bond_link_monitoring_set;
@@ -39,8 +37,6 @@ DPDK_16.07 {
rte_eth_bond_8023ad_ext_distrib;
rte_eth_bond_8023ad_ext_distrib_get;
rte_eth_bond_8023ad_ext_slowtx;
- rte_eth_bond_8023ad_conf_get;
- rte_eth_bond_8023ad_setup;
} DPDK_16.04;
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 7cef6279..65df1425 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -62,12 +62,15 @@ endif
CFLAGS_BASE_DRIVER =
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
#
# Add extra flags for base driver files (also known as shared code)
# to disable warnings in them
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 5e5f221e..f2057af1 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -36,6 +36,7 @@
#ifndef __T4_ADAPTER_H__
#define __T4_ADAPTER_H__
+#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_io.h>
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index a8ccea00..282e2e62 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -40,7 +40,6 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
@@ -403,6 +402,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
t4_os_atomic_list_del(&entry, &adap->mbox_list,
&adap->mbox_lock);
t4_report_fw_error(adap);
+ free(temp);
return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
}
@@ -446,6 +446,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
&adap->mbox_list,
&adap->mbox_lock));
t4_report_fw_error(adap);
+ free(temp);
return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
}
@@ -546,6 +547,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
T4_OS_MBOX_LOCKING(
t4_os_atomic_list_del(&entry, &adap->mbox_list,
&adap->mbox_lock));
+ free(temp);
return -G_FW_CMD_RETVAL((int)res);
}
}
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 7bca4561..dc153c73 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -48,10 +48,10 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
@@ -647,7 +647,7 @@ static void cxgbe_dev_rx_queue_release(void *q)
/*
* Get port statistics.
*/
-static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
+static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *eth_stats)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -690,6 +690,7 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
eth_stats->q_obytes[i] = txq->stats.tx_bytes;
eth_stats->q_errors[i] = txq->stats.mapping_err;
}
+ return 0;
}
/*
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index b709fe2b..5b828c23 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -51,7 +51,6 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_alarm.h>
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 5376fc50..fc10d958 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -149,7 +149,7 @@ static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
struct rte_mbuf *m = mbuf;
for (; m; m = m->next, addr++) {
- *addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
+ *addr = m->buf_iova + rte_pktmbuf_headroom(m);
if (*addr == 0)
goto out_err;
}
@@ -423,7 +423,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
mbuf->nb_segs = 1;
mbuf->port = rxq->rspq.port_id;
- mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_physaddr +
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
mbuf->data_off,
adap->sge.fl_align);
mapping |= buf_size_idx;
@@ -1318,7 +1318,7 @@ alloc_sw_ring:
if (metadata)
*(void **)metadata = s;
- *phys = (uint64_t)tz->phys_addr;
+ *phys = (uint64_t)tz->iova;
return tz->addr;
}
@@ -1405,7 +1405,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
if (pkt->vlan_ex) {
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = ntohs(pkt->vlan);
}
rxq->stats.pkts++;
@@ -1550,7 +1550,7 @@ static int process_responses(struct sge_rspq *q, int budget,
}
if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt->ol_flags |= PKT_RX_VLAN;
pkt->vlan_tci = ntohs(cpl->vlan);
}
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
new file mode 100644
index 00000000..171686ec
--- /dev/null
+++ b/drivers/net/dpaa/Makefile
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
new file mode 100644
index 00000000..cf5a2ecf
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -0,0 +1,1109 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
+
+#include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
+
+/* Keep track of whether QMAN and BMAN have been globally initialized */
+static int is_global_init;
+
+struct rte_dpaa_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset;
+};
+
+static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
+ {"rx_align_err",
+ offsetof(struct dpaa_if_stats, raln)},
+ {"rx_valid_pause",
+ offsetof(struct dpaa_if_stats, rxpf)},
+ {"rx_fcs_err",
+ offsetof(struct dpaa_if_stats, rfcs)},
+ {"rx_vlan_frame",
+ offsetof(struct dpaa_if_stats, rvlan)},
+ {"rx_frame_err",
+ offsetof(struct dpaa_if_stats, rerr)},
+ {"rx_drop_err",
+ offsetof(struct dpaa_if_stats, rdrp)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, rund)},
+ {"rx_oversize_err",
+ offsetof(struct dpaa_if_stats, rovr)},
+ {"rx_fragment_pkt",
+ offsetof(struct dpaa_if_stats, rfrg)},
+ {"tx_valid_pause",
+ offsetof(struct dpaa_if_stats, txpf)},
+ {"tx_fcs_err",
+ offsetof(struct dpaa_if_stats, terr)},
+ {"tx_vlan_frame",
+ offsetof(struct dpaa_if_stats, tvlan)},
+ {"rx_undersized",
+ offsetof(struct dpaa_if_stats, tund)},
+};
+
+static int
+dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mtu < ETHER_MIN_MTU)
+ return -EINVAL;
+ if (mtu > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
+
+ fman_if_set_maxfrm(dpaa_intf->fif, mtu);
+
+ return 0;
+}
+
+static int
+dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ DPAA_MAX_RX_PKT_LEN)
+ return dpaa_mtu_set(dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ else
+ return -1;
+ }
+ return 0;
+}
+
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+ return ptypes;
+ return NULL;
+}
+
+static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Change tx callback to the real one */
+ dev->tx_pkt_burst = dpaa_eth_queue_tx;
+ fman_if_enable_rx(dpaa_intf->fif);
+
+ return 0;
+}
+
+static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_disable_rx(dpaa_intf->fif);
+ dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+}
+
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+}
+
+static int
+dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
+ char *fw_version,
+ size_t fw_size)
+{
+ int ret;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (!svr_file) {
+ DPAA_PMD_ERR("Unable to open SoC device");
+ return -ENOTSUP; /* Not supported on this infra */
+ }
+
+ ret = fscanf(svr_file, "svr:%x", &svr_ver);
+ if (ret <= 0) {
+ DPAA_PMD_ERR("Unable to read SoC device");
+ return -ENOTSUP; /* Not supported on this infra */
+ }
+
+ ret = snprintf(fw_version, fw_size,
+ "svr:%x-fman-v%x",
+ svr_ver,
+ fman_ip_rev);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (uint32_t)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
+ dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
+ dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
+ dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G);
+ dev_info->rx_offload_capa =
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ dev_info->tx_offload_capa =
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+}
+
+static int dpaa_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_link *link = &dev->data->dev_link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ link->link_speed = 1000;
+ else if (dpaa_intf->fif->mac_type == fman_mac_10g)
+ link->link_speed = 10000;
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, dpaa_intf->fif->mac_type);
+
+ link->link_status = dpaa_intf->valid;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+ return 0;
+}
+
+static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_get(dpaa_intf->fif, stats);
+ return 0;
+}
+
+static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_stats_reset(dpaa_intf->fif);
+}
+
+static int
+dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+
+ if (xstats == NULL)
+ return 0;
+
+ if (n < num)
+ return num;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values,
+ sizeof(struct dpaa_if_stats) / 8);
+
+ for (i = 0; i < num; i++) {
+ xstats[i].id = i;
+ xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
+ }
+ return i;
+}
+
+static int
+dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ dpaa_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+
+ if (!ids) {
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ if (n < stat_cnt)
+ return stat_cnt;
+
+ if (!values)
+ return 0;
+
+ fman_if_stats_get_all(dpaa_intf->fif, values_copy,
+ sizeof(struct dpaa_if_stats));
+
+ for (i = 0; i < stat_cnt; i++)
+ values[i] =
+ values_copy[dpaa_xstats_strings[i].offset / 8];
+
+ return stat_cnt;
+ }
+
+ dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static int
+dpaa_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+
+ if (!ids)
+ return dpaa_xstats_get_names(dev, xstats_names, limit);
+
+ dpaa_xstats_get_names(dev, xstats_names_copy, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ DPAA_PMD_ERR("id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_enable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_promiscuous_disable(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_set_mcast_filter_table(dpaa_intf->fif);
+}
+
+static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+}
+
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+
+ if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+ struct fman_if_ic_params icp;
+ uint32_t fd_offset;
+ uint32_t bp_size;
+
+ if (!mp->pool_data) {
+ DPAA_PMD_ERR("Not an offloaded buffer pool!");
+ return -1;
+ }
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+ memset(&icp, 0, sizeof(icp));
+ /* set ICEOF for to the default value , which is 0*/
+ icp.iciof = DEFAULT_ICIOF;
+ icp.iceof = DEFAULT_RX_ICEOF;
+ icp.icsz = DEFAULT_ICSZ;
+ fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+ fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+ fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+ /* Buffer pool size should be equal to Dataroom Size*/
+ bp_size = rte_pktmbuf_data_room_size(mp);
+ fman_if_set_bp(dpaa_intf->fif, mp->size,
+ dpaa_intf->bp_info->bpid, bp_size);
+ dpaa_intf->valid = 1;
+ DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
+ dpaa_intf->name, fd_offset,
+ fman_if_get_fdoff(dpaa_intf->fif));
+ }
+ dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+
+ return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+ dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+ return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int dpaa_link_down(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_stop(dev);
+ return 0;
+}
+
+static int dpaa_link_up(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_eth_dev_start(dev);
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ net_fc = dpaa_intf->fc_conf;
+
+ if (fc_conf->high_water < fc_conf->low_water) {
+ DPAA_PMD_ERR("Incorrect Flow Control Configuration");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE) {
+ return 0;
+ } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
+ fc_conf->mode == RTE_FC_FULL) {
+ fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
+ fc_conf->low_water,
+ dpaa_intf->bp_info->bpid);
+ if (fc_conf->pause_time)
+ fman_if_set_fc_quanta(dpaa_intf->fif,
+ fc_conf->pause_time);
+ }
+
+ /* Save the information in dpaa device */
+ net_fc->pause_time = fc_conf->pause_time;
+ net_fc->high_water = fc_conf->high_water;
+ net_fc->low_water = fc_conf->low_water;
+ net_fc->send_xon = fc_conf->send_xon;
+ net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
+ net_fc->mode = fc_conf->mode;
+ net_fc->autoneg = fc_conf->autoneg;
+
+ return 0;
+}
+
+static int
+dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (net_fc) {
+ fc_conf->pause_time = net_fc->pause_time;
+ fc_conf->high_water = net_fc->high_water;
+ fc_conf->low_water = net_fc->low_water;
+ fc_conf->send_xon = net_fc->send_xon;
+ fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
+ fc_conf->mode = net_fc->mode;
+ fc_conf->autoneg = net_fc->autoneg;
+ return 0;
+ }
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
+
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
+ " err = %d", ret);
+ return 0;
+}
+
+static void
+dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
+ uint32_t index)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fman_if_clear_mac_addr(dpaa_intf->fif, index);
+}
+
+static void
+dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ int ret;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
+ if (ret)
+ RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+}
+
+static struct eth_dev_ops dpaa_devops = {
+ .dev_configure = dpaa_eth_dev_configure,
+ .dev_start = dpaa_eth_dev_start,
+ .dev_stop = dpaa_eth_dev_stop,
+ .dev_close = dpaa_eth_dev_close,
+ .dev_infos_get = dpaa_eth_dev_info,
+ .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
+
+ .rx_queue_setup = dpaa_eth_rx_queue_setup,
+ .tx_queue_setup = dpaa_eth_tx_queue_setup,
+ .rx_queue_release = dpaa_eth_rx_queue_release,
+ .tx_queue_release = dpaa_eth_tx_queue_release,
+
+ .flow_ctrl_get = dpaa_flow_ctrl_get,
+ .flow_ctrl_set = dpaa_flow_ctrl_set,
+
+ .link_update = dpaa_eth_link_update,
+ .stats_get = dpaa_eth_stats_get,
+ .xstats_get = dpaa_dev_xstats_get,
+ .xstats_get_by_id = dpaa_xstats_get_by_id,
+ .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
+ .xstats_get_names = dpaa_xstats_get_names,
+ .xstats_reset = dpaa_eth_stats_reset,
+ .stats_reset = dpaa_eth_stats_reset,
+ .promiscuous_enable = dpaa_eth_promiscuous_enable,
+ .promiscuous_disable = dpaa_eth_promiscuous_disable,
+ .allmulticast_enable = dpaa_eth_multicast_enable,
+ .allmulticast_disable = dpaa_eth_multicast_disable,
+ .mtu_set = dpaa_mtu_set,
+ .dev_set_link_down = dpaa_link_down,
+ .dev_set_link_up = dpaa_link_up,
+ .mac_addr_add = dpaa_dev_add_mac_addr,
+ .mac_addr_remove = dpaa_dev_remove_mac_addr,
+ .mac_addr_set = dpaa_dev_set_mac_addr,
+
+ .fw_version_get = dpaa_fw_version_get,
+};
+
+static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
+{
+ struct rte_eth_fc_conf *fc_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(dpaa_intf->fc_conf)) {
+ dpaa_intf->fc_conf = rte_zmalloc(NULL,
+ sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
+ if (!dpaa_intf->fc_conf) {
+ DPAA_PMD_ERR("unable to save flow control info");
+ return -ENOMEM;
+ }
+ }
+ fc_conf = dpaa_intf->fc_conf;
+ ret = fman_if_get_fc_threshold(dpaa_intf->fif);
+ if (ret) {
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
+ } else {
+ fc_conf->mode = RTE_FC_NONE;
+ }
+
+ return 0;
+}
+
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq,
+ uint32_t fqid)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA;
+
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Enable tail drop */
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
+ opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
+ qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+ return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+ struct fman_if *fman_intf)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.dest.channel = fman_intf->tx_channel_id;
+ opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+ opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_b = 0;
+ /* no tx-confirmation */
+ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+ opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+ return ret;
+}
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
+static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = qman_reserve_fqid(fqid);
+ if (ret) {
+ DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return -EINVAL;
+ }
+ /* "map" this Rx FQ to one of the interfaces Tx FQID */
+ DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
+ ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+ if (ret) {
+ DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+ }
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
+ opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
+ ret = qman_init_fq(fq, 0, &opts);
+ if (ret)
+ DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
+ fqid, ret);
+ return ret;
+}
+#endif
+
+/* Initialise a network interface */
+static int
+dpaa_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int num_cores, num_rx_fqs, fqid;
+ int loop, ret = 0;
+ int dev_id;
+ struct rte_dpaa_device *dpaa_device;
+ struct dpaa_if *dpaa_intf;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+ struct fman_if_bpool *bp, *tmp_bp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
+ dev_id = dpaa_device->id.dev_id;
+ dpaa_intf = eth_dev->data->dev_private;
+ cfg = &dpaa_netcfg->port_cfg[dev_id];
+ fman_intf = cfg->fman_if;
+
+ dpaa_intf->name = dpaa_device->name;
+
+ /* save fman_if & cfg in the interface struture */
+ dpaa_intf->fif = fman_intf;
+ dpaa_intf->ifid = dev_id;
+ dpaa_intf->cfg = cfg;
+
+ /* Initialize Rx FQ's */
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+
+ /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+ * queues.
+ */
+ if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+ DPAA_PMD_ERR("Invalid number of RX queues\n");
+ return -EINVAL;
+ }
+
+ dpaa_intf->rx_queues = rte_zmalloc(NULL,
+ sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ for (loop = 0; loop < num_rx_fqs; loop++) {
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
+ ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+ if (ret)
+ return ret;
+ dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+ /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+ num_cores = rte_lcore_count();
+ dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+ num_cores, MAX_CACHELINE);
+ if (!dpaa_intf->tx_queues)
+ return -ENOMEM;
+
+ for (loop = 0; loop < num_cores; loop++) {
+ ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+ fman_intf);
+ if (ret)
+ return ret;
+ dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+ }
+ dpaa_intf->nb_tx_queues = num_cores;
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
+ dpaa_debug_queue_init(&dpaa_intf->debug_queues[
+ DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+#endif
+
+ DPAA_PMD_DEBUG("All frame queues created");
+
+ /* Get the initial configuration for flow control */
+ dpaa_fc_set_default(dpaa_intf);
+
+ /* reset bpool list, initialize bpool dynamically */
+ list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+ list_del(&bp->node);
+ free(bp);
+ }
+
+ /* Populate ethdev structure */
+ eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+ eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+ rte_free(dpaa_intf->rx_queues);
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->rx_queues = NULL;
+ dpaa_intf->tx_queues = NULL;
+ dpaa_intf->nb_rx_queues = 0;
+ dpaa_intf->nb_tx_queues = 0;
+ return -ENOMEM;
+ }
+
+ /* copy the primary mac address */
+ ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dpaa_device->name,
+ fman_intf->mac_addr.addr_bytes[0],
+ fman_intf->mac_addr.addr_bytes[1],
+ fman_intf->mac_addr.addr_bytes[2],
+ fman_intf->mac_addr.addr_bytes[3],
+ fman_intf->mac_addr.addr_bytes[4],
+ fman_intf->mac_addr.addr_bytes[5]);
+
+ /* Disable RX mode */
+ fman_if_discard_rx_errors(fman_intf);
+ fman_if_disable_rx(fman_intf);
+ /* Disable promiscuous mode */
+ fman_if_promiscuous_disable(fman_intf);
+ /* Disable multicast */
+ fman_if_reset_mcast_filter_table(fman_intf);
+ /* Reset interface statistics */
+ fman_if_stats_reset(fman_intf);
+
+ return 0;
+}
+
+static int
+dpaa_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (!dpaa_intf) {
+ DPAA_PMD_WARN("Already closed or not started");
+ return -1;
+ }
+
+ dpaa_eth_dev_close(dev);
+
+ /* release configuration memory */
+ if (dpaa_intf->fc_conf)
+ rte_free(dpaa_intf->fc_conf);
+
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+
+ /* free memory for storing MAC addresses */
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ int diag;
+ int ret;
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* In case of secondary process, the device is already configured
+ * and no further action is required, except portal initialization
+ * and verifying secondary attachment to port name.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+ if (!eth_dev)
+ return -ENOMEM;
+ return 0;
+ }
+
+ if (!is_global_init) {
+ /* One time load of Qman/Bman drivers */
+ ret = qman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("QMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+ ret = bman_global_init();
+ if (ret) {
+ DPAA_PMD_ERR("BMAN initialization failed: %d",
+ ret);
+ return ret;
+ }
+
+ is_global_init = 1;
+ }
+
+ ret = rte_dpaa_portal_init((void *)1);
+ if (ret) {
+ DPAA_PMD_ERR("Unable to initialize portal");
+ return ret;
+ }
+
+ eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa_if),
+ RTE_CACHE_LINE_SIZE);
+ if (!eth_dev->data->dev_private) {
+ DPAA_PMD_ERR("Cannot allocate memzone for port data");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->device->driver = &dpaa_drv->driver;
+ dpaa_dev->eth_dev = eth_dev;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa_dev_init(eth_dev);
+ if (diag == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = dpaa_dev->eth_dev;
+ dpaa_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa_driver rte_dpaa_pmd = {
+ .drv_type = FSL_DPAA_ETH,
+ .probe = rte_dpaa_probe,
+ .remove = rte_dpaa_remove,
+};
+
+RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
new file mode 100644
index 00000000..5457d61b
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -0,0 +1,182 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DPAA_ETHDEV_H__
+#define __DPAA_ETHDEV_H__
+
+/* System headers */
+#include <stdbool.h>
+#include <rte_ethdev.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "sys/devices/soc0/soc_id"
+
+#define DPAA_MBUF_HW_ANNOTATION 64
+#define DPAA_FD_PTA_SIZE 64
+
+#if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA_HW_BUF_RESERVE 0
+#define DPAA_PACKET_LAYOUT_ALIGN 64
+
+/* Alignment to use for cpu-local structs to avoid coherency problems. */
+#define MAX_CACHELINE 64
+
+#define DPAA_MIN_RX_BUF_SIZE 512
+#define DPAA_MAX_RX_PKT_LEN 10240
+
+/* RX queue tail drop threshold
+ * currently considering 32 KB packets.
+ */
+#define CONG_THRESHOLD_RX_Q (32 * 1024)
+
+/*max mac filter for memac(8) including primary mac addr*/
+#define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
+
+/*Maximum number of slots available in TX ring*/
+#define MAX_TX_RING_SLOTS 8
+
+/* PCD frame queues */
+#define DPAA_PCD_FQID_START 0x400
+#define DPAA_PCD_FQID_MULTIPLIER 0x100
+#define DPAA_DEFAULT_NUM_PCD_QUEUES 1
+
+#define DPAA_IF_TX_PRIORITY 3
+#define DPAA_IF_RX_PRIORITY 4
+#define DPAA_IF_DEBUG_PRIORITY 7
+
+#define DPAA_IF_RX_ANNOTATION_STASH 1
+#define DPAA_IF_RX_DATA_STASH 1
+#define DPAA_IF_RX_CONTEXT_STASH 0
+
+/* Each "debug" FQ is represented by one of these */
+#define DPAA_DEBUG_FQ_RX_ERROR 0
+#define DPAA_DEBUG_FQ_TX_ERROR 1
+
+#define DPAA_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM)
+
+/* DPAA Frame descriptor macros */
+
+#define DPAA_FD_CMD_FCO 0x80000000
+/**< Frame queue Context Override */
+#define DPAA_FD_CMD_RPD 0x40000000
+/**< Read Prepended Data */
+#define DPAA_FD_CMD_UPD 0x20000000
+/**< Update Prepended Data */
+#define DPAA_FD_CMD_DTC 0x10000000
+/**< Do IP/TCP/UDP Checksum */
+#define DPAA_FD_CMD_DCL4C 0x10000000
+/**< Didn't calculate L4 Checksum */
+#define DPAA_FD_CMD_CFQ 0x00ffffff
+/**< Confirmation Frame Queue */
+
+/* Each network interface is represented by one of these */
+struct dpaa_if {
+ int valid;
+ char *name;
+ const struct fm_eth_port_cfg *cfg;
+ struct qman_fq *rx_queues;
+ struct qman_fq *tx_queues;
+ struct qman_fq debug_queues[2];
+ uint16_t nb_rx_queues;
+ uint16_t nb_tx_queues;
+ uint32_t ifid;
+ struct fman_if *fif;
+ struct dpaa_bp_info *bp_info;
+ struct rte_eth_fc_conf *fc_conf;
+};
+
+struct dpaa_if_stats {
+ /* Rx Statistics Counter */
+ uint64_t reoct; /**<Rx Eth Octets Counter */
+ uint64_t roct; /**<Rx Octet Counters */
+ uint64_t raln; /**<Rx Alignment Error Counter */
+ uint64_t rxpf; /**<Rx valid Pause Frame */
+ uint64_t rfrm; /**<Rx Frame counter */
+ uint64_t rfcs; /**<Rx frame check seq error */
+ uint64_t rvlan; /**<Rx Vlan Frame Counter */
+ uint64_t rerr; /**<Rx Frame error */
+ uint64_t ruca; /**<Rx Unicast */
+ uint64_t rmca; /**<Rx Multicast */
+ uint64_t rbca; /**<Rx Broadcast */
+ uint64_t rdrp; /**<Rx Dropped Packet */
+ uint64_t rpkt; /**<Rx packet */
+ uint64_t rund; /**<Rx undersized packets */
+ uint32_t res_x[14];
+ uint64_t rovr; /**<Rx oversized but good */
+ uint64_t rjbr; /**<Rx oversized with bad csum */
+ uint64_t rfrg; /**<Rx fragment Packet */
+ uint64_t rcnp; /**<Rx control packets (0x8808 */
+ uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */
+ uint32_t res01d0[12];
+ /* Tx Statistics Counter */
+ uint64_t teoct; /**<Tx eth octets */
+ uint64_t toct; /**<Tx Octets */
+ uint32_t res0210[2];
+ uint64_t txpf; /**<Tx valid pause frame */
+ uint64_t tfrm; /**<Tx frame counter */
+ uint64_t tfcs; /**<Tx FCS error */
+ uint64_t tvlan; /**<Tx Vlan Frame */
+ uint64_t terr; /**<Tx frame error */
+ uint64_t tuca; /**<Tx Unicast */
+ uint64_t tmca; /**<Tx Multicast */
+ uint64_t tbca; /**<Tx Broadcast */
+ uint32_t res0258[2];
+ uint64_t tpkt; /**<Tx Packet */
+ uint64_t tund; /**<Tx Undersized */
+};
+
+#endif
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
new file mode 100644
index 00000000..41e57f2e
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -0,0 +1,756 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* System headers */
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <limits.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "dpaa_ethdev.h"
+#include "dpaa_rxtx.h"
+#include <rte_dpaa_bus.h>
+#include <dpaa_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
+ do { \
+ (_fd)->cmd = 0; \
+ (_fd)->opaque_addr = 0; \
+ (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
+ (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
+ (_fd)->opaque |= (_mbuf)->pkt_len; \
+ (_fd)->addr = (_mbuf)->buf_iova; \
+ (_fd)->bpid = _bpid; \
+ } while (0)
+
+#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
+void dpaa_display_frame(const struct qm_fd *fd)
+{
+ int ii;
+ char *ptr;
+
+ printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
+ __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
+ fd->offset, fd->length20, fd->status);
+
+ ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
+ ptr += fd->offset;
+ printf("%02x ", *ptr);
+ for (ii = 1; ii < fd->length20; ii++) {
+ printf("%02x ", *ptr);
+ if ((ii % 16) == 0)
+ printf("\n");
+ ptr++;
+ }
+ printf("\n");
+}
+#else
+#define dpaa_display_frame(a)
+#endif
+
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+ uint64_t prs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Slow parsing");
+ /*TBD:XXX: to be implemented*/
+}
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
+ uint64_t fd_virt_addr)
+{
+ struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+ uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+ DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+ switch (prs) {
+ case DPAA_PKT_TYPE_NONE:
+ m->packet_type = 0;
+ break;
+ case DPAA_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case DPAA_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case DPAA_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case DPAA_PKT_TYPE_IPV4_FRAG:
+ case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV6_FRAG:
+ case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
+ case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case DPAA_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_EXT_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+ break;
+ /* More switch cases can be added */
+ default:
+ dpaa_slow_parsing(m, prs);
+ }
+
+ m->tx_offload = annot->parse.ip_off[0];
+ m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+ << DPAA_PKT_L3_LEN_SHIFT;
+
+ /* Set the hash values */
+ m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
+ m->ol_flags = PKT_RX_RSS_HASH;
+ /* All packets with Bad checksum are dropped by interface (and
+ * corresponding notification issued to RX error queues).
+ */
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ /* Check if Vlan is present */
+ if (prs & DPAA_PARSE_VLAN_MASK)
+ m->ol_flags |= PKT_RX_VLAN;
+ /* Packet received without stripping the vlan */
+}
+
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT)) {
+ ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr->hdr_checksum = 0;
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+ struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ tcp_hdr->cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ tcp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ tcp_hdr);
+ } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+ RTE_PTYPE_L4_UDP) {
+ struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+ mbuf->l3_len);
+ udp_hdr->dgram_cksum = 0;
+ if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+ udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+ udp_hdr);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+ udp_hdr);
+ }
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+ struct qm_fd *fd, char *prs_buf)
+{
+ struct dpaa_eth_parse_results_t *prs;
+
+ DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+ prs = GET_TX_PRS(prs_buf);
+ prs->l3r = 0;
+ prs->l4r = 0;
+ if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV4_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+ else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6) ||
+ ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+ RTE_PTYPE_L3_IPV6_EXT))
+ prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+ if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
+ else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
+
+ prs->ip_off[0] = mbuf->l2_len;
+ prs->l4_off = mbuf->l3_len + mbuf->l2_len;
+ /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
+ fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
+}
+
+struct rte_mbuf *
+dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
+ struct qm_sg_entry *sgt, *sg_temp;
+ void *vaddr, *sg_vaddr;
+ int i = 0;
+ uint8_t fd_offset = fd->offset;
+
+ DPAA_DP_LOG(DEBUG, "Received an SG frame");
+
+ vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
+ if (!vaddr) {
+ DPAA_PMD_ERR("unable to convert physical address");
+ return NULL;
+ }
+ sgt = vaddr + fd_offset;
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
+ sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+
+ first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ first_seg->data_off = sg_temp->offset;
+ first_seg->data_len = sg_temp->length;
+ first_seg->pkt_len = sg_temp->length;
+ rte_mbuf_refcnt_set(first_seg, 1);
+
+ first_seg->port = ifid;
+ first_seg->nb_segs = 1;
+ first_seg->ol_flags = 0;
+ prev_seg = first_seg;
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ hw_sg_to_cpu(sg_temp);
+ sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+ cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
+ bp_info->meta_data_size);
+ cur_seg->data_off = sg_temp->offset;
+ cur_seg->data_len = sg_temp->length;
+ first_seg->pkt_len += sg_temp->length;
+ first_seg->nb_segs += 1;
+ rte_mbuf_refcnt_set(cur_seg, 1);
+ prev_seg->next = cur_seg;
+ if (sg_temp->final) {
+ cur_seg->next = NULL;
+ break;
+ }
+ prev_seg = cur_seg;
+ }
+
+ dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
+ rte_pktmbuf_free_seg(temp);
+
+ return first_seg;
+}
+
+static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
+ uint32_t ifid)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+ struct rte_mbuf *mbuf;
+ void *ptr;
+ uint8_t format =
+ (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
+ uint16_t offset =
+ (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+ uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ DPAA_DP_LOG(DEBUG, " FD--->MBUF");
+
+ if (unlikely(format == qm_fd_sg))
+ return dpaa_eth_sg_to_mbuf(fd, ifid);
+
+ /* Ignoring case when format != qm_fd_contig */
+ dpaa_display_frame(fd);
+ ptr = rte_dpaa_mem_ptov(fd->addr);
+ /* Ignoring case when ptr would be NULL. That is only possible incase
+ * of a corrupted packet
+ */
+
+ mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+ /* Prefetch the Parse results and packet data to L1 */
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ rte_prefetch0((void *)((uint8_t *)ptr + offset));
+
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+
+ mbuf->port = ifid;
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+
+ return mbuf;
+}
+
+uint16_t dpaa_eth_queue_rx(void *q,
+ struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ struct qman_fq *fq = q;
+ struct qm_dqrr_entry *dq;
+ uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ int ret;
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+
+ ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+ DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
+ if (ret)
+ return 0;
+
+ do {
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+ bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+ return num_rx;
+}
+
+static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
+{
+ int ret;
+ uint64_t buf = 0;
+ struct bm_buffer bufs;
+
+ ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
+ if (ret <= 0) {
+ DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
+ return (void *)buf;
+ }
+
+ DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
+ (uint64_t)bufs.addr, bufs.bpid);
+
+ buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
+ if (!buf)
+ goto out;
+
+out:
+ return (void *)buf;
+}
+
+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
+ struct dpaa_if *dpaa_intf)
+{
+ struct rte_mbuf *dpaa_mbuf;
+
+ /* allocate pktbuffer on bpid for dpaa port */
+ dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
+ if (!dpaa_mbuf)
+ return NULL;
+
+ memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+ ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
+
+ /* Copy only the required fields */
+ dpaa_mbuf->data_off = mbuf->data_off;
+ dpaa_mbuf->pkt_len = mbuf->pkt_len;
+ dpaa_mbuf->ol_flags = mbuf->ol_flags;
+ dpaa_mbuf->packet_type = mbuf->packet_type;
+ dpaa_mbuf->tx_offload = mbuf->tx_offload;
+ rte_pktmbuf_free(mbuf);
+ return dpaa_mbuf;
+}
+
+int
+dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid)
+{
+ struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
+ struct rte_mbuf *temp, *mi;
+ struct qm_sg_entry *sg_temp, *sgt;
+ int i = 0;
+
+ DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
+
+ temp = rte_pktmbuf_alloc(bp_info->mp);
+ if (!temp) {
+ DPAA_PMD_ERR("Failure in allocation of mbuf");
+ return -1;
+ }
+ if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
+ + temp->data_off)) {
+ DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
+ return -1;
+ }
+
+ fd->cmd = 0;
+ fd->opaque_addr = 0;
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (temp->data_off < DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t))
+ temp->data_off = DEFAULT_TX_ICEOF
+ + sizeof(struct dpaa_eth_parse_results_t);
+ dcbz_64(temp->buf_addr);
+ dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
+ }
+
+ sgt = temp->buf_addr + temp->data_off;
+ fd->format = QM_FD_SG;
+ fd->addr = temp->buf_iova;
+ fd->offset = temp->data_off;
+ fd->bpid = bpid;
+ fd->length20 = mbuf->pkt_len;
+
+ while (i < DPAA_SGT_MAX_ENTRIES) {
+ sg_temp = &sgt[i++];
+ sg_temp->opaque = 0;
+ sg_temp->val = 0;
+ sg_temp->addr = cur_seg->buf_iova;
+ sg_temp->offset = cur_seg->data_off;
+ sg_temp->length = cur_seg->data_len;
+ if (RTE_MBUF_DIRECT(cur_seg)) {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else {
+ sg_temp->bpid =
+ DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
+ }
+ cur_seg = cur_seg->next;
+ } else {
+ /* Get owner MBUF from indirect buffer */
+ mi = rte_mbuf_from_indirect(cur_seg);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /*If refcnt > 1, invalid bpid is set to ensure
+ * owner buffer is not freed by HW.
+ */
+ sg_temp->bpid = 0xff;
+ } else {
+ sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
+ rte_mbuf_refcnt_update(mi, 1);
+ }
+ prev_seg = cur_seg;
+ cur_seg = cur_seg->next;
+ prev_seg->next = NULL;
+ rte_pktmbuf_free(prev_seg);
+ }
+ if (cur_seg == NULL) {
+ sg_temp->final = 1;
+ cpu_to_hw_sg(sg_temp);
+ break;
+ }
+ cpu_to_hw_sg(sg_temp);
+ }
+ return 0;
+}
+
+/* Handle mbufs which are not segmented (non SG) */
+static inline void
+tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ struct rte_mbuf *mi = NULL;
+
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ if (rte_mbuf_refcnt_read(mbuf) > 1) {
+ /* In case of direct mbuf and mbuf being cloned,
+ * BMAN should _not_ release buffer.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ /* Buffer should be releasd by EAL */
+ rte_mbuf_refcnt_update(mbuf, -1);
+ } else {
+ /* In case of direct mbuf and no cloning, mbuf can be
+ * released by BMAN.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ } else {
+ /* This is data-containing core mbuf: 'mi' */
+ mi = rte_mbuf_from_indirect(mbuf);
+ if (rte_mbuf_refcnt_read(mi) > 1) {
+ /* In case of indirect mbuf, and mbuf being cloned,
+ * BMAN should _not_ release it and let EAL release
+ * it through pktmbuf_free below.
+ */
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+ } else {
+ /* In case of indirect mbuf, and no cloning, core mbuf
+ * should be released by BMAN.
+ * Increate refcnt of core mbuf so that when
+ * pktmbuf_free is called and mbuf is released, EAL
+ * doesn't try to release core mbuf which would have
+ * been released by BMAN.
+ */
+ rte_mbuf_refcnt_update(mi, 1);
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+ }
+ rte_pktmbuf_free(mbuf);
+ }
+
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (mbuf->data_off < (DEFAULT_TX_ICEOF +
+ sizeof(struct dpaa_eth_parse_results_t))) {
+ DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
+ "Not enough Headroom "
+ "space for correct Checksum offload."
+ "So Calculating checksum in Software.");
+ dpaa_checksum(mbuf);
+ } else {
+ dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
+ }
+ }
+}
+
+/* Handle all mbufs on dpaa BMAN managed pool */
+static inline uint16_t
+tx_on_dpaa_pool(struct rte_mbuf *mbuf,
+ struct dpaa_bp_info *bp_info,
+ struct qm_fd *fd_arr)
+{
+ DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
+
+ if (mbuf->nb_segs == 1) {
+ /* Case for non-segmented buffers */
+ tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+ } else if (mbuf->nb_segs > 1 &&
+ mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
+ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
+ DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
+ return 1;
+ }
+ } else {
+ DPAA_PMD_DEBUG("Number of Segments not supported");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Handle all mbufs on an external pool (non-dpaa) */
+static inline uint16_t
+tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
+ struct qm_fd *fd_arr)
+{
+ struct dpaa_if *dpaa_intf = txq->dpaa_intf;
+ struct rte_mbuf *dmable_mbuf;
+
+ DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
+ "Allocating an offloaded buffer");
+ dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
+ if (!dmable_mbuf) {
+ DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
+ return 1;
+ }
+
+ DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+
+ return 0;
+}
+
+uint16_t
+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct rte_mbuf *mbuf, *mi = NULL;
+ struct rte_mempool *mp;
+ struct dpaa_bp_info *bp_info;
+ struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send, loop, i = 0;
+ uint16_t state;
+ int ret;
+
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
+
+ while (nb_bufs) {
+ frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
+ for (loop = 0; loop < frames_to_send; loop++, i++) {
+ mbuf = bufs[i];
+ if (RTE_MBUF_DIRECT(mbuf)) {
+ mp = mbuf->pool;
+ } else {
+ mi = rte_mbuf_from_indirect(mbuf);
+ mp = mi->pool;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
+ state = tx_on_dpaa_pool(mbuf, bp_info,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ } else {
+ state = tx_on_external_pool(q, mbuf,
+ &fd_arr[loop]);
+ if (unlikely(state)) {
+ /* Set frames_to_send & nb_bufs so
+ * that packets are transmitted till
+ * previous frame.
+ */
+ frames_to_send = loop;
+ nb_bufs = loop;
+ goto send_pkts;
+ }
+ }
+ }
+
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi(q, &fd_arr[loop],
+ frames_to_send - loop);
+ }
+ nb_bufs -= frames_to_send;
+ }
+
+ DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
+
+ return i;
+}
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused)
+{
+ DPAA_DP_LOG(DEBUG, "Drop all packets");
+
+ /* Drop all incoming packets. No need to free packets here
+ * because the rte_eth f/w frees up the packets through tx_buffer
+ * callback in case this functions returns count less than nb_bufs
+ */
+ return 0;
+}
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
new file mode 100644
index 00000000..2ffc4ffe
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -0,0 +1,297 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPDK_RXTX_H__
+#define __DPDK_RXTX_H__
+
+/* internal offset from where IC is copied to packet buffer*/
+#define DEFAULT_ICIOF 32
+/* IC transfer size */
+#define DEFAULT_ICSZ 48
+
+/* IC offsets from buffer header address */
+#define DEFAULT_RX_ICEOF 16
+#define DEFAULT_TX_ICEOF 16
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define DPAA_L3_PARSE_RESULT_IPV4 0x80
+/* L3 Type field: First IP Present IPv6 */
+#define DPAA_L3_PARSE_RESULT_IPV6 0x40
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define DPAA_L4_PARSE_RESULT_UDP 0x40
+/* L4 Type field: TCP */
+#define DPAA_L4_PARSE_RESULT_TCP 0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+ /** <Maximum number of frames to be dequeued in a single rx call*/
+
+/* FD structure masks and offset */
+#define DPAA_FD_FORMAT_MASK 0xE0000000
+#define DPAA_FD_OFFSET_MASK 0x1FF00000
+#define DPAA_FD_LENGTH_MASK 0xFFFFF
+#define DPAA_FD_FORMAT_SHIFT 29
+#define DPAA_FD_OFFSET_SHIFT 20
+
+/* Parsing mask (Little Endian) - 0x00E044ED00800000
+ * Classification Plan ID 0x00
+ * L4R 0xE0 -
+ * 0x20 - TCP
+ * 0x40 - UDP
+ * 0x80 - SCTP
+ * L3R 0xEDC4 (in Big Endian) -
+ * 0x8000 - IPv4
+ * 0x4000 - IPv6
+ * 0x8140 - IPv4 Ext + Frag
+ * 0x8040 - IPv4 Frag
+ * 0x8100 - IPv4 Ext
+ * 0x4140 - IPv6 Ext + Frag
+ * 0x4040 - IPv6 Frag
+ * 0x4100 - IPv6 Ext
+ * L2R 0x8000 (in Big Endian) -
+ * 0x8000 - Ethernet type
+ * ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK 0x00E044ED00800000
+#define DPAA_PARSE_VLAN_MASK 0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE 0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER 0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4 \
+ (0x0000008000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV6 \
+ (0x0000004000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_GRE \
+ (0x0000002000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV4_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_FRAG \
+ (0x0000400000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_EXT \
+ (0x0000000100000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_SCTP \
+ (0x0080000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_IPV4_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_TUNNEL_4_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6 \
+ (0x0000000400000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_4 \
+ (0x0000000800000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_UDP \
+ (0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP \
+ (0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_L3_LEN_SHIFT 7
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+ uint8_t lpid; /**< Logical port id */
+ uint8_t shimr; /**< Shim header result */
+ union {
+ uint16_t l2r; /**< Layer 2 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t ethernet:1;
+ uint16_t vlan:1;
+ uint16_t llc_snap:1;
+ uint16_t mpls:1;
+ uint16_t ppoe_ppp:1;
+ uint16_t unused_1:3;
+ uint16_t unknown_eth_proto:1;
+ uint16_t eth_frame_type:2;
+ uint16_t l2r_err:5;
+ /*00-unicast, 01-multicast, 11-broadcast*/
+#else
+ uint16_t l2r_err:5;
+ uint16_t eth_frame_type:2;
+ uint16_t unknown_eth_proto:1;
+ uint16_t unused_1:3;
+ uint16_t ppoe_ppp:1;
+ uint16_t mpls:1;
+ uint16_t llc_snap:1;
+ uint16_t vlan:1;
+ uint16_t ethernet:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint16_t l3r; /**< Layer 3 result */
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t first_ipv4:1;
+ uint16_t first_ipv6:1;
+ uint16_t gre:1;
+ uint16_t min_enc:1;
+ uint16_t last_ipv4:1;
+ uint16_t last_ipv6:1;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t last_ip_err_code:3;
+#else
+ uint16_t last_ip_err_code:3;
+ uint16_t last_info_err:1; /*0 info, 1 error*/
+ uint16_t first_ip_err_code:5;
+ uint16_t first_info_err:1;/*0 info, 1 error*/
+ uint16_t last_ipv6:1;
+ uint16_t last_ipv4:1;
+ uint16_t min_enc:1;
+ uint16_t gre:1;
+ uint16_t first_ipv6:1;
+ uint16_t first_ipv4:1;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ union {
+ uint8_t l4r; /**< Layer 4 result */
+ struct{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint8_t l4_type:3;
+ uint8_t l4_info_err:1;
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+#else
+ uint8_t l4_result:4;
+ /* if type IPSec: 1 ESP, 2 AH */
+ uint8_t l4_info_err:1;
+ uint8_t l4_type:3;
+#endif
+ } __attribute__((__packed__));
+ } __attribute__((__packed__));
+ uint8_t cplan; /**< Classification plan id */
+ uint16_t nxthdr; /**< Next Header */
+ uint16_t cksum; /**< Checksum */
+ uint32_t lcv; /**< LCV */
+ uint8_t shim_off[3]; /**< Shim offset */
+ uint8_t eth_off; /**< ETH offset */
+ uint8_t llc_snap_off; /**< LLC_SNAP offset */
+ uint8_t vlan_off[2]; /**< VLAN offset */
+ uint8_t etype_off; /**< ETYPE offset */
+ uint8_t pppoe_off; /**< PPP offset */
+ uint8_t mpls_off[2]; /**< MPLS offset */
+ uint8_t ip_off[2]; /**< IP offset */
+ uint8_t gre_off; /**< GRE offset */
+ uint8_t l4_off; /**< Layer 4 offset */
+ uint8_t nxthdr_off; /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+ uint8_t reserved[DEFAULT_RX_ICEOF];
+ struct dpaa_eth_parse_results_t parse; /**< Pointer to Parsed result*/
+ uint64_t reserved1;
+ uint64_t hash; /**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+ (struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_RX_ICEOF)
+
+#define GET_TX_PRS(_buf) \
+ (struct dpaa_eth_parse_results_t *)((uint8_t *)(_buf) + \
+ DEFAULT_TX_ICEOF)
+
+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
+ struct rte_mbuf **bufs __rte_unused,
+ uint16_t nb_bufs __rte_unused);
+
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid);
+
+int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+ struct qm_fd *fd,
+ uint32_t bpid);
+
+#endif
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 32cd819b..ee9b2cce 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -63,8 +63,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 4c82aa87..e3ab90ac 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -49,14 +49,14 @@
#include "../dpaa2_ethdev.h"
-static void
+static int
dpaa2_distset_to_dpkg_profile_cfg(
- uint32_t req_dist_set,
+ uint64_t req_dist_set,
struct dpkg_profile_cfg *kg_cfg);
int
dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
- uint32_t req_dist_set)
+ uint64_t req_dist_set)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
struct fsl_mc_io *dpni = priv->hw;
@@ -68,20 +68,26 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ PMD_INIT_LOG(ERR, "Memory unavailable");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
- dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+ ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported",
+ req_dist_set);
+ rte_free(p_params);
+ return ret;
+ }
tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
- ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -90,9 +96,9 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
&tc_cfg);
rte_free(p_params);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Setting distribution for Rx failed with err: %d\n",
- ret);
+ PMD_INIT_LOG(ERR,
+ "Setting distribution for Rx failed with err: %d",
+ ret);
return ret;
}
@@ -113,19 +119,19 @@ int dpaa2_remove_flow_dist(
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ PMD_INIT_LOG(ERR, "Memory unavailable");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
-
+ kg_cfg.num_extracts = 0;
tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
tc_cfg.dist_size = 0;
tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
- ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -133,18 +139,16 @@ int dpaa2_remove_flow_dist(
ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
&tc_cfg);
rte_free(p_params);
- if (ret) {
- RTE_LOG(ERR, PMD,
- "Setting distribution for Rx failed with err: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ PMD_INIT_LOG(ERR,
+ "Setting distribution for Rx failed with err:%d",
+ ret);
return ret;
}
-static void
+static int
dpaa2_distset_to_dpkg_profile_cfg(
- uint32_t req_dist_set,
+ uint64_t req_dist_set,
struct dpkg_profile_cfg *kg_cfg)
{
uint32_t loop = 0, i = 0, dist_field = 0;
@@ -278,14 +282,17 @@ dpaa2_distset_to_dpkg_profile_cfg(
break;
default:
- PMD_DRV_LOG(WARNING, "Bad flow distribution"
- " option %x\n", dist_field);
+ PMD_INIT_LOG(WARNING,
+ "Unsupported flow dist option %x",
+ dist_field);
+ return -EINVAL;
}
}
req_dist_set = req_dist_set >> 1;
loop++;
}
kg_cfg->num_extracts = i;
+ return 0;
}
int
@@ -337,6 +344,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
bpool_cfg.pools[0].backup_pool = 0;
bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
DPAA2_PACKET_LAYOUT_ALIGN);
+ bpool_cfg.pools[0].priority_mask = 0;
retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
if (retcode != 0) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 429b3a08..202f84f0 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -52,8 +52,32 @@
#include <mc/fsl_dpmng.h>
#include "dpaa2_ethdev.h"
+struct rte_dpaa2_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint8_t page_id; /* dpni statistics page id */
+ uint8_t stats_id; /* stats id in the given page */
+};
+
+static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
+ {"ingress_multicast_frames", 0, 2},
+ {"ingress_multicast_bytes", 0, 3},
+ {"ingress_broadcast_frames", 0, 4},
+ {"ingress_broadcast_bytes", 0, 5},
+ {"egress_multicast_frames", 1, 2},
+ {"egress_multicast_bytes", 1, 3},
+ {"egress_broadcast_frames", 1, 4},
+ {"egress_broadcast_bytes", 1, 5},
+ {"ingress_filtered_frames", 2, 0},
+ {"ingress_discarded_frames", 2, 1},
+ {"ingress_nobuffer_discards", 2, 2},
+ {"egress_discarded_frames", 2, 3},
+ {"egress_confirmed_frames", 2, 4},
+};
+
static struct rte_dpaa2_driver rte_dpaa2_pmd;
static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
+static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -138,7 +162,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return ret;
}
-static void
+static int
dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -158,6 +182,14 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
ret);
}
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ RTE_LOG(INFO, PMD,
+ "VLAN extend offload not supported\n");
+ }
+
+ return 0;
}
static int
@@ -304,8 +336,10 @@ fail:
static int
dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_dev_data *data = dev->data;
- struct rte_eth_conf *eth_conf = &data->dev_conf;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ int rx_ip_csum_offload = false;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -324,18 +358,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
- /* Check for correct configuration */
- if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
- data->nb_rx_queues > 1) {
- PMD_INIT_LOG(ERR, "Distribution is not enabled, "
- "but Rx queues more than 1\n");
- return -1;
- }
-
if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
- /* Return in case number of Rx queues is 1 */
- if (data->nb_rx_queues == 1)
- return 0;
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf);
if (ret) {
@@ -344,6 +367,41 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
return ret;
}
}
+
+ if (eth_conf->rxmode.hw_ip_checksum)
+ rx_ip_csum_offload = true;
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L3_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L4_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ /* update the current status */
+ dpaa2_dev_link_update(dev, 0);
+
return 0;
}
@@ -370,8 +428,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
- dev, rx_queue_id, mb_pool, rx_conf);
+ PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
bpid = mempool_to_bpid(mb_pool);
@@ -419,8 +477,9 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*enabling per rx queue congestion control */
taildrop.threshold = CONG_THRESHOLD_RX_Q;
taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
- PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
- rx_queue_id);
+ taildrop.oal = CONG_RX_OAL;
+ PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d",
+ rx_queue_id);
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, &taildrop);
@@ -455,8 +514,10 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* Return if queue already configured */
- if (dpaa2_q->flow_id != 0xffff)
+ if (dpaa2_q->flow_id != 0xffff) {
+ dev->data->tx_queues[tx_queue_id] = dpaa2_q;
return 0;
+ }
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
@@ -555,9 +616,87 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+/**
+ * Dpaa2 link Interrupt handler
+ *
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+dpaa2_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = param;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+ int irq_index = DPNI_IRQ_INDEX;
+ unsigned int status = 0, clear = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, &status);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret);
+ clear = 0xffffffff;
+ goto out;
+ }
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
+ clear = DPNI_IRQ_EVENT_LINK_CHANGED;
+ dpaa2_dev_link_update(dev, 0);
+ /* calling all the apps registered for link status event */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
+out:
+ ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, clear);
+ if (unlikely(ret))
+ RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret);
+}
+
+static int
+dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
+{
+ int err = 0;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int irq_index = DPNI_IRQ_INDEX;
+ unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
+
+ PMD_INIT_FUNC_TRACE();
+
+ err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, mask);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err,
+ strerror(-err));
+ return err;
+ }
+
+ err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
+ irq_index, enable);
+ if (err < 0)
+ PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err,
+ strerror(-err));
+
+ return err;
+}
+
static int
dpaa2_dev_start(struct rte_eth_dev *dev)
{
+ struct rte_device *rdev = dev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
struct rte_eth_dev_data *data = dev->data;
struct dpaa2_dev_priv *priv = data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
@@ -567,6 +706,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
struct dpni_queue_id qid;
struct dpaa2_queue *dpaa2_q;
int ret, i;
+ struct rte_intr_handle *intr_handle;
+
+ dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+ intr_handle = &dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -577,7 +720,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
return ret;
}
- /* Power up the phy. Needed to make the link go Up */
+ /* Power up the phy. Needed to make the link go UP */
dpaa2_dev_set_link_up(dev);
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
@@ -601,34 +744,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
dpaa2_q->fqid = qid.fqid;
}
- ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L3_CSUM, true);
- if (ret) {
- PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
- return ret;
- }
-
- ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L4_CSUM, true);
- if (ret) {
- PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
- return ret;
- }
-
- ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L3_CSUM, true);
- if (ret) {
- PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
- return ret;
- }
-
- ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L4_CSUM, true);
- if (ret) {
- PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
- return ret;
- }
-
/*checksum errors, send them to normal path and set it in annotation */
err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
@@ -643,8 +758,33 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
return ret;
}
/* VLAN Offload Settings */
- if (priv->max_vlan_filters)
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (priv->max_vlan_filters) {
+ ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:"
+ "code = %d\n", ret);
+ return ret;
+ }
+ }
+
+
+ /* if the interrupts were configured on this devices*/
+ if (intr_handle && (intr_handle->fd) &&
+ (dev->data->dev_conf.intr_conf.lsc != 0)) {
+ /* Registering LSC interrupt handler */
+ rte_intr_callback_register(intr_handle,
+ dpaa2_interrupt_handler,
+ (void *)dev);
+
+ /* enable vfio intr/eventfd mapping
+ * Interrupt index 0 is required, so we can not use
+ * rte_intr_enable.
+ */
+ rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
+
+ /* enable dpni_irqs */
+ dpaa2_eth_setup_irqs(dev, 1);
+ }
return 0;
}
@@ -660,9 +800,25 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
int ret;
struct rte_eth_link link;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
PMD_INIT_FUNC_TRACE();
+ /* reset interrupt callback */
+ if (intr_handle && (intr_handle->fd) &&
+ (dev->data->dev_conf.intr_conf.lsc != 0)) {
+ /*disable dpni irqs */
+ dpaa2_eth_setup_irqs(dev, 0);
+
+ /* disable vfio intr before callback unregister */
+ rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
+
+ /* Unregistering LSC interrupt handler */
+ rte_intr_callback_unregister(intr_handle,
+ dpaa2_interrupt_handler,
+ (void *)dev);
+ }
+
dpaa2_dev_set_link_down(dev);
ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
@@ -838,7 +994,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
PMD_DRV_LOG(ERR, "setting the max frame length failed");
return -1;
}
- PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu);
return 0;
}
@@ -916,7 +1072,7 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
"error: Setting the MAC ADDR failed %d\n", ret);
}
static
-void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -931,17 +1087,17 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
if (!dpni) {
RTE_LOG(ERR, PMD, "dpni is NULL\n");
- return;
+ return -EINVAL;
}
if (!stats) {
RTE_LOG(ERR, PMD, "stats is NULL\n");
- return;
+ return -EINVAL;
}
/*Get Counters from page_0*/
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
- page0, &value);
+ page0, 0, &value);
if (retcode)
goto err;
@@ -950,7 +1106,7 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
/*Get Counters from page_1*/
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
- page1, &value);
+ page1, 0, &value);
if (retcode)
goto err;
@@ -959,7 +1115,7 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
/*Get Counters from page_2*/
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
- page2, &value);
+ page2, 0, &value);
if (retcode)
goto err;
@@ -971,15 +1127,158 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
stats->oerrors = value.page_2.egress_discarded_frames;
stats->imissed = value.page_2.ingress_nobuffer_discards;
- return;
+ return 0;
err:
RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
- return;
+ return retcode;
};
-static
-void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
+static int
+dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ union dpni_statistics value[3] = {};
+ unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
+
+ if (xstats == NULL)
+ return 0;
+
+ if (n < num)
+ return num;
+
+ /* Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 0, 0, &value[0]);
+ if (retcode)
+ goto err;
+
+ /* Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 1, 0, &value[1]);
+ if (retcode)
+ goto err;
+
+ /* Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 2, 0, &value[2]);
+ if (retcode)
+ goto err;
+
+ for (i = 0; i < num; i++) {
+ xstats[i].id = i;
+ xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
+ raw.counter[dpaa2_xstats_strings[i].stats_id];
+ }
+ return i;
+err:
+ RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode);
+ return retcode;
+}
+
+static int
+dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+
+ if (xstats_names != NULL)
+ for (i = 0; i < stat_cnt; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s",
+ dpaa2_xstats_strings[i].name);
+
+ return stat_cnt;
+}
+
+static int
+dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ uint64_t values_copy[stat_cnt];
+
+ if (!ids) {
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ union dpni_statistics value[3] = {};
+
+ if (n < stat_cnt)
+ return stat_cnt;
+
+ if (!values)
+ return 0;
+
+ /* Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 0, 0, &value[0]);
+ if (retcode)
+ return 0;
+
+ /* Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 1, 0, &value[1]);
+ if (retcode)
+ return 0;
+
+ /* Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ 2, 0, &value[2]);
+ if (retcode)
+ return 0;
+
+ for (i = 0; i < stat_cnt; i++) {
+ values[i] = value[dpaa2_xstats_strings[i].page_id].
+ raw.counter[dpaa2_xstats_strings[i].stats_id];
+ }
+ return stat_cnt;
+ }
+
+ dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= stat_cnt) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
+static int
+dpaa2_xstats_get_names_by_id(
+ struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+
+ if (!ids)
+ return dpaa2_xstats_get_names(dev, xstats_names, limit);
+
+ dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= stat_cnt) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
+static void
+dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
@@ -1014,8 +1313,6 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link, old;
struct dpni_link_state state = {0};
- PMD_INIT_FUNC_TRACE();
-
if (dpni == NULL) {
RTE_LOG(ERR, PMD, "dpni is NULL\n");
return 0;
@@ -1048,7 +1345,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
if (link.link_status)
PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
else
- PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
+ PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id);
return 0;
}
@@ -1063,8 +1360,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
struct dpaa2_dev_priv *priv;
struct fsl_mc_io *dpni;
int en = 0;
-
- PMD_INIT_FUNC_TRACE();
+ struct dpni_link_state state = {0};
priv = dev->data->dev_private;
dpni = (struct fsl_mc_io *)priv->hw;
@@ -1090,11 +1386,21 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
return -EINVAL;
}
}
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ return -1;
+ }
+
/* changing tx burst function to start enqueues */
dev->tx_pkt_burst = dpaa2_dev_tx;
- dev->data->dev_link.link_status = 1;
+ dev->data->dev_link.link_status = state.up;
- PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id);
+ if (state.up)
+ PMD_DRV_LOG(INFO, "Port %d Link is set as UP",
+ dev->data->port_id);
+ else
+ PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id);
return ret;
}
@@ -1299,6 +1605,108 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
return ret;
}
+static int
+dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rss_conf->rss_hf) {
+ ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "unable to set flow dist");
+ return ret;
+ }
+ } else {
+ ret = dpaa2_remove_flow_dist(dev, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "unable to remove flow dist");
+ return ret;
+ }
+ }
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
+ return 0;
+}
+
+static int
+dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ /* dpaa2 does not support rss_key, so length should be 0*/
+ rss_conf->rss_key_len = 0;
+ rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
+ return 0;
+}
+
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+ dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
+ else
+ return -EINVAL;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_DPCON;
+ cfg.destination.id = dpcon_id;
+ cfg.destination.priority = queue_conf->ev.priority;
+
+ options |= DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (uint64_t)(dpaa2_ethq);
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+ struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+ uint8_t flow_id = dpaa2_ethq->flow_id;
+ struct dpni_queue cfg;
+ uint8_t options;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+ options = DPNI_QUEUE_OPT_DEST;
+ cfg.destination.type = DPNI_DEST_NONE;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+ dpaa2_ethq->tc_index, flow_id, options, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+
+ return ret;
+}
+
static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_configure = dpaa2_eth_dev_configure,
.dev_start = dpaa2_dev_start,
@@ -1312,7 +1720,12 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.dev_set_link_down = dpaa2_dev_set_link_down,
.link_update = dpaa2_dev_link_update,
.stats_get = dpaa2_dev_stats_get,
+ .xstats_get = dpaa2_dev_xstats_get,
+ .xstats_get_by_id = dpaa2_xstats_get_by_id,
+ .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
+ .xstats_get_names = dpaa2_xstats_get_names,
.stats_reset = dpaa2_dev_stats_reset,
+ .xstats_reset = dpaa2_dev_stats_reset,
.fw_version_get = dpaa2_fw_version_get,
.dev_infos_get = dpaa2_dev_info_get,
.dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
@@ -1328,6 +1741,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.mac_addr_add = dpaa2_dev_add_mac_addr,
.mac_addr_remove = dpaa2_dev_remove_mac_addr,
.mac_addr_set = dpaa2_dev_set_mac_addr,
+ .rss_hash_update = dpaa2_dev_rss_hash_update,
+ .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
};
static int
@@ -1384,22 +1799,19 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
goto init_err;
}
- priv->num_tc = attr.num_tcs;
+ priv->num_rx_tc = attr.num_rx_tcs;
- /* Resetting the "num_rx_vqueues" to equal number of queues in first TC
+ /* Resetting the "num_rx_queues" to equal number of queues in first TC
* as only one TC is supported on Rx Side. Once Multiple TCs will be
* in use for Rx processing then this will be changed or removed.
*/
priv->nb_rx_queues = attr.num_queues;
- /* TODO:Using hard coded value for number of TX queues due to dependency
- * in MC.
- */
- priv->nb_tx_queues = 8;
+ /* Using number of TX queues as number of TX TCs */
+ priv->nb_tx_queues = attr.num_tx_tcs;
- PMD_INIT_LOG(DEBUG, "num TC - RX %d", priv->num_tc);
- PMD_INIT_LOG(DEBUG, "nb_tx_queues %d", priv->nb_tx_queues);
- PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
+ PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
+ priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues);
priv->hw = dpni_dev;
priv->hw_id = hw_id;
@@ -1460,11 +1872,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
eth_dev->dev_ops = &dpaa2_ethdev_ops;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
rte_fslmc_vfio_dmamap();
+ RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
init_err:
dpaa2_dev_uninit(eth_dev);
@@ -1525,6 +1939,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
}
@@ -1588,7 +2003,7 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
}
static struct rte_dpaa2_driver rte_dpaa2_pmd = {
- .drv_type = DPAA2_MC_DPNI_DEVID,
+ .drv_type = DPAA2_ETH,
.probe = rte_dpaa2_probe,
.remove = rte_dpaa2_remove,
};
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index a2902da2..b8e94aa3 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -34,6 +34,8 @@
#ifndef _DPAA2_ETHDEV_H
#define _DPAA2_ETHDEV_H
+#include <rte_event_eth_rx_adapter.h>
+
#include <mc/fsl_dpni.h>
#include <mc/fsl_mc_sys.h>
@@ -61,6 +63,7 @@
* currently considering 32 KB packets
*/
#define CONG_THRESHOLD_RX_Q (64 * 1024)
+#define CONG_RX_OAL 128
/* Size of the input SMMU mapped memory required by MC */
#define DIST_PARAM_IOVA_SIZE 256
@@ -87,20 +90,33 @@ struct dpaa2_dev_priv {
uint32_t options;
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
- uint8_t num_tc;
+ uint8_t num_rx_tc;
uint8_t flags; /*dpaa2 config flags */
};
int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
- uint32_t req_dist_set);
+ uint64_t req_dist_set);
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ uint16_t dpcon_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id);
+
uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
+void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 3c057a39..8ecd238d 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -122,7 +122,7 @@ dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
if (BIT_ISSET_AT_POS(annotation->word3,
L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_RX_VLAN;
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
@@ -350,7 +350,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
- rte_pktmbuf_free(mbuf);
return -1;
}
m = (struct rte_mbuf *)mb;
@@ -382,8 +381,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_OFFSET(fd),
DPAA2_GET_FD_LEN(fd));
- /*free the original packet */
- rte_pktmbuf_free(mbuf);
return 0;
}
@@ -422,7 +419,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
- while (!qbman_check_command_complete(swp,
+ while (!qbman_check_command_complete(
get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
;
clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
@@ -445,7 +442,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* Also seems like the SWP is shared between the Ethernet Driver
* and the SEC driver.
*/
- while (!qbman_check_command_complete(swp, dq_storage))
+ while (!qbman_check_command_complete(dq_storage))
;
if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
clear_swp_active_dqs(q_storage->active_dpio_id);
@@ -453,7 +450,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
- while (!qbman_result_has_new_result(swp, dq_storage))
+ while (!qbman_check_new_result(dq_storage))
;
rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
/* Check whether Last Pull command is Expired and
@@ -486,7 +483,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
- while (!qbman_check_command_complete(swp,
+ while (!qbman_check_command_complete(
get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
;
clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
@@ -517,6 +514,26 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_rx;
}
+void __attribute__((hot))
+dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/
@@ -560,7 +577,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
while (nb_pkts) {
/*Check if the queue is congested*/
retry_count = 0;
- if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn)) {
+ while (qbman_result_SCN_state(dpaa2_q->cscn)) {
retry_count++;
/* Retry for some time before giving up */
if (retry_count > CONG_RETRY_COUNT)
@@ -580,39 +597,35 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
mp = mi->pool;
}
/* Not a hw_pkt pool allocated frame */
- if (!mp) {
+ if (unlikely(!mp || !priv->bp_list)) {
PMD_TX_LOG(ERR, "err: no bpool attached");
- goto skip_tx;
+ goto send_n_return;
}
+
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
PMD_TX_LOG(ERR, "non hw offload bufffer ");
/* alloc should be from the default buffer pool
* attached to this interface
*/
- if (priv->bp_list) {
- bpid = priv->bp_list->buf_pool.bpid;
- } else {
- PMD_TX_LOG(ERR,
- "err: no bpool attached");
- num_tx = 0;
- goto skip_tx;
- }
+ bpid = priv->bp_list->buf_pool.bpid;
+
if (unlikely((*bufs)->nb_segs > 1)) {
PMD_TX_LOG(ERR, "S/G support not added"
" for non hw offload buffer");
- goto skip_tx;
+ goto send_n_return;
}
if (eth_copy_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid)) {
- bufs++;
- continue;
+ goto send_n_return;
}
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
} else {
bpid = mempool_to_bpid(mp);
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop], bpid))
- goto skip_tx;
+ goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid);
@@ -622,7 +635,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
loop = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_send_multiple(swp, &eqdesc,
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop], frames_to_send - loop);
}
@@ -630,6 +643,20 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
dpaa2_q->tx_pkts += frames_to_send;
nb_pkts -= frames_to_send;
}
+ return num_tx;
+
+send_n_return:
+ /* send any already prepared fd */
+ if (loop) {
+ unsigned int i = 0;
+
+ while (i < loop) {
+ i += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[i], loop - i);
+ }
+ num_tx += loop;
+ dpaa2_q->tx_pkts += loop;
+ }
skip_tx:
return num_tx;
}
diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
new file mode 100644
index 00000000..3f98907f
--- /dev/null
+++ b/drivers/net/dpaa2/mc/dpkg.c
@@ -0,0 +1,107 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpkg.h>
+
+/**
+ * dpkg_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ * - dpkg_prepare_key_cfg()
+ */
+int
+dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
+{
+ int i, j;
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ dpkg_set_field(extr->efh_type, EFH_TYPE,
+ cfg->extracts[i].extract.from_hdr.type);
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cpu_to_le32(
+ cfg->extracts[i].extract.from_hdr.field);
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
+ cfg->extracts[i].type);
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index c2d39691..6f671fe0 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -42,100 +42,39 @@
#include <fsl_dpni.h>
#include <fsl_dpni_cmd.h>
-int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
- uint8_t *key_cfg_buf)
-{
- int i, j;
- int offset = 0;
- int param = 1;
- uint64_t *params = (uint64_t *)key_cfg_buf;
-
- if (!key_cfg_buf || !cfg)
- return -EINVAL;
-
- params[0] |= mc_enc(0, 8, cfg->num_extracts);
- params[0] = cpu_to_le64(params[0]);
-
- if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
- return -EINVAL;
-
- for (i = 0; i < cfg->num_extracts; i++) {
- switch (cfg->extracts[i].type) {
- case DPKG_EXTRACT_FROM_HDR:
- params[param] |= mc_enc(0, 8,
- cfg->extracts[i].extract.from_hdr.prot);
- params[param] |= mc_enc(8, 4,
- cfg->extracts[i].extract.from_hdr.type);
- params[param] |= mc_enc(16, 8,
- cfg->extracts[i].extract.from_hdr.size);
- params[param] |= mc_enc(24, 8,
- cfg->extracts[i].extract.
- from_hdr.offset);
- params[param] |= mc_enc(32, 32,
- cfg->extracts[i].extract.
- from_hdr.field);
- params[param] = cpu_to_le64(params[param]);
- param++;
- params[param] |= mc_enc(0, 8,
- cfg->extracts[i].extract.
- from_hdr.hdr_index);
- break;
- case DPKG_EXTRACT_FROM_DATA:
- params[param] |= mc_enc(16, 8,
- cfg->extracts[i].extract.
- from_data.size);
- params[param] |= mc_enc(24, 8,
- cfg->extracts[i].extract.
- from_data.offset);
- params[param] = cpu_to_le64(params[param]);
- param++;
- break;
- case DPKG_EXTRACT_FROM_PARSE:
- params[param] |= mc_enc(16, 8,
- cfg->extracts[i].extract.
- from_parse.size);
- params[param] |= mc_enc(24, 8,
- cfg->extracts[i].extract.
- from_parse.offset);
- params[param] = cpu_to_le64(params[param]);
- param++;
- break;
- default:
- return -EINVAL;
- }
- params[param] |= mc_enc(
- 24, 8, cfg->extracts[i].num_of_byte_masks);
- params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
- params[param] = cpu_to_le64(params[param]);
- param++;
- for (offset = 0, j = 0;
- j < DPKG_NUM_OF_MASKS;
- offset += 16, j++) {
- params[param] |= mc_enc(
- (offset), 8, cfg->extracts[i].masks[j].mask);
- params[param] |= mc_enc(
- (offset + 8), 8,
- cfg->extracts[i].masks[j].offset);
- }
- params[param] = cpu_to_le64(params[param]);
- param++;
- }
- return 0;
-}
-
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpni_id,
uint16_t *token)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
cmd_flags,
0);
- DPNI_CMD_OPEN(cmd, dpni_id);
+ cmd_params = (struct dpni_cmd_open *)cmd.params;
+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -143,11 +82,22 @@ int dpni_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_close(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -163,12 +113,35 @@ int dpni_close(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
-int dpni_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpni_cfg *cfg,
- uint32_t *obj_id)
+/**
+ * dpni_create() - Create the DPNI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPNI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id)
{
+ struct dpni_cmd_create *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -176,7 +149,14 @@ int dpni_create(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPNI_CMD_CREATE(cmd, cfg);
+ cmd_params = (struct dpni_cmd_create *)cmd.params;
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->num_queues = cfg->num_queues;
+ cmd_params->num_tcs = cfg->num_tcs;
+ cmd_params->mac_filter_entries = cfg->mac_filter_entries;
+ cmd_params->vlan_filter_entries = cfg->vlan_filter_entries;
+ cmd_params->qos_entries = cfg->qos_entries;
+ cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -184,16 +164,32 @@ int dpni_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
-int dpni_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+/**
+ * dpni_destroy() - Destroy the DPNI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
{
+ struct dpni_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
@@ -201,28 +197,63 @@ int dpni_destroy(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
/* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ cmd_params = (struct dpni_cmd_destroy *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(object_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_pools(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const struct dpni_pools_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
cmd_flags,
token);
- DPNI_CMD_SET_POOLS(cmd, cfg);
+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -238,6 +269,14 @@ int dpni_enable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_disable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -253,15 +292,27 @@ int dpni_disable(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
int err;
+
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+ cmd_flags,
token);
/* send command to mc*/
@@ -270,11 +321,20 @@ int dpni_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_IS_ENABLED(cmd, *en);
+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
return 0;
}
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_reset(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -290,12 +350,256 @@ int dpni_reset(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_attributes(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_attr *attr)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
int err;
/* prepare command */
@@ -309,28 +613,65 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_ATTR(cmd, attr);
+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->num_queues = rsp_params->num_queues;
+ attr->num_rx_tcs = rsp_params->num_rx_tcs;
+ attr->num_tx_tcs = rsp_params->num_tx_tcs;
+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->qos_entries = rsp_params->qos_entries;
+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+ attr->qos_key_size = rsp_params->qos_key_size;
+ attr->fs_key_size = rsp_params->fs_key_size;
+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
return 0;
}
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * This function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- struct dpni_error_cfg *cfg)
+ struct dpni_error_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
cmd_flags,
token);
- DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+ cmd_params->errors = cpu_to_le32(cfg->errors);
+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to retrieve configuration for
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -338,13 +679,16 @@ int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
struct dpni_buffer_layout *layout)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
cmd_flags,
token);
- DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype);
+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -352,29 +696,72 @@ int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout);
+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+ layout->data_align = le16_to_cpu(rsp_params->data_align);
+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
return 0;
}
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue this configuration applies to
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- const struct dpni_buffer_layout *layout)
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
cmd_flags,
token);
- DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout);
+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->options = cpu_to_le16(layout->options);
+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+ cmd_params->data_align = cpu_to_le16(layout->data_align);
+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables the offload
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+
int dpni_set_offload(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -382,17 +769,32 @@ int dpni_set_offload(struct fsl_mc_io *mc_io,
uint32_t config)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
- /* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
cmd_flags,
token);
- DPNI_CMD_SET_OFFLOAD(cmd, type, config);
+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+ cmd_params->config = cpu_to_le32(config);
- /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_offload() - Get DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, a value of 1 indicates that the
+ * offload is enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
int dpni_get_offload(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -400,13 +802,16 @@ int dpni_get_offload(struct fsl_mc_io *mc_io,
uint32_t *config)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
cmd_flags,
token);
- DPNI_CMD_GET_OFFLOAD(cmd, type);
+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -414,11 +819,24 @@ int dpni_get_offload(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_OFFLOAD(cmd, *config);
+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+ *config = le32_to_cpu(rsp_params->config);
return 0;
}
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to receive QDID for
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_qdid(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -426,13 +844,16 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io,
uint16_t *qdid)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
cmd_flags,
token);
- DPNI_CMD_GET_QDID(cmd, qtype);
+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+ cmd_params->qtype = qtype;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -440,34 +861,92 @@ int dpni_get_qdid(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_QDID(cmd, *qdid);
+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+ *qdid = le16_to_cpu(rsp_params->qdid);
return 0;
}
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *data_offset)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
+ return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const struct dpni_link_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
cmd_flags,
token);
- DPNI_CMD_SET_LINK_CFG(cmd, cfg);
+ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_link_state(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_link_state *state)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
int err;
/* prepare command */
@@ -481,34 +960,60 @@ int dpni_get_link_state(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_LINK_STATE(cmd, state);
+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
return 0;
}
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in bytes);
+ * frame is discarded if its length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint16_t max_frame_length)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
cmd_flags,
token);
- DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in bytes);
+ * frame is discarded if its length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint16_t *max_frame_length)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
int err;
/* prepare command */
@@ -522,34 +1027,56 @@ int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
return 0;
}
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int en)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
cmd_flags,
token);
- DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en);
+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
int err;
/* prepare command */
@@ -563,34 +1090,56 @@ int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en);
+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
return 0;
}
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int en)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
cmd_flags,
token);
- DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
int *en)
{
struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
int err;
/* prepare command */
@@ -604,35 +1153,59 @@ int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
return 0;
}
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const uint8_t mac_addr[6])
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
cmd_flags,
token);
- DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t mac_addr[6])
{
struct mc_command cmd = { 0 };
- int err;
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
@@ -645,45 +1218,85 @@ int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
return 0;
}
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const uint8_t mac_addr[6])
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
cmd_flags,
token);
- DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr);
+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const uint8_t mac_addr[6])
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
cmd_flags,
token);
- DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr);
+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -691,24 +1304,40 @@ int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
int multicast)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
cmd_flags,
token);
- DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast);
+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t mac_addr[6])
{
struct mc_command cmd = { 0 };
- int err;
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
@@ -721,62 +1350,105 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr);
+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
return 0;
}
+/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
- int en)
+ uint16_t token,
+ int en)
{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
cmd_flags,
token);
- DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint16_t vlan_id)
{
+ struct dpni_cmd_vlan_id *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
cmd_flags,
token);
- DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint16_t vlan_id)
{
+ struct dpni_cmd_vlan_id *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
cmd_flags,
token);
- DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_clear_vlan_filters() - Clear all VLAN filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -792,6 +1464,19 @@ int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpkg_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -799,87 +1484,185 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_tc_dist_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
cmd_flags,
token);
- DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ cmd_params->tc_id = tc_id;
+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->flags,
+ DIST_MODE,
+ cfg->dist_mode);
+ dpni_set_field(cmd_params->flags,
+ MISS_ACTION,
+ cfg->fs_cfg.miss_action);
+ dpni_set_field(cmd_params->keep_hash_key,
+ KEEP_HASH_KEY,
+ cfg->fs_cfg.keep_hash_key);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_confirmation_mode mode)
+/**
+ * dpni_set_tx_confirmation_mode() - Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
+ * selected at DPNI creation.
+ * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all
+ * transmit confirmation (including the private confirmation queues), regardless
+ * of previous settings; Note that in this case, Tx error frames are still
+ * enqueued to the general transmit errors queue.
+ * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
+ * Tx confirmations to a shared Tx conf queue. 'index' field in dpni_get_queue
+ * command will be ignored.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode)
{
+ struct dpni_tx_confirmation_mode *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE,
cmd_flags,
token);
- DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode);
+ cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params;
+ cmd_params->confirmation_mode = mode;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int dpni_set_congestion_notification(
- struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- uint8_t tc_id,
+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
const struct dpni_congestion_notification_cfg *cfg)
{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(
- DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
- cmd_flags,
- token);
- DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id, cfg);
+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+ dpni_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units,
+ CONG_UNITS,
+ cfg->units);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
+/**
+ * dpni_get_congestion_notification() - Get traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint8_t tc_id,
struct dpni_congestion_notification_cfg *cfg)
{
+ struct dpni_rsp_get_congestion_notification *rsp_params;
+ struct dpni_cmd_get_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(
- DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
- cmd_flags,
- token);
- DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc_id);
+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg);
+ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
+ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
+ DEST_TYPE);
return 0;
}
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpni_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
@@ -891,87 +1674,177 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
if (err)
return err;
- DPNI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
+ * configuration options are set on the queue
+ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
+ uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
- uint8_t index,
+ uint8_t tc,
+ uint8_t index,
uint8_t options,
- const struct dpni_queue *queue)
+ const struct dpni_queue *queue)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
cmd_flags,
token);
- DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue);
-
- /* send command to mc*/
+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+ cmd_params->dest_prio = queue->destination.priority;
+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+ queue->destination.hold_active);
+ cmd_params->flc = cpu_to_le64(queue->flc.value);
+ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+ /* send command to mc */
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
+ uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
- uint8_t index,
+ uint8_t tc,
+ uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
cmd_flags,
token);
- DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index);
+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
- /* send command to mc*/
+ /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_QUEUE(cmd, queue, qid);
+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
+ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+ HOLD_ACTIVE);
+ queue->flc.value = le64_to_cpu(rsp_params->flc);
+ queue->user_context = le64_to_cpu(rsp_params->user_context);
+ qid->fqid = le32_to_cpu(rsp_params->fqid);
+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
return 0;
}
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
+ * @param: Custom parameter for some pages used to select
+ * a certain statistic source, for example the TC.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t page,
+ uint8_t param,
union dpni_statistics *stat)
{
struct mc_command cmd = { 0 };
- int err;
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
cmd_flags,
token);
- DPNI_CMD_GET_STATISTICS(cmd, page);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
+ cmd_params->param = param;
- /* send command to mc*/
+ /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_STATISTICS(cmd, stat);
+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
return 0;
}
+/**
+ * dpni_reset_statistics() - Clears DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_reset_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token)
@@ -987,52 +1860,117 @@ int dpni_reset_statistics(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ *
+ * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current
+ * congestion notification or early drop (WRED) configuration previously applied
+ * to the same TC.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point, DPNI_CP_QUEUE is only supported in
+ * combination with DPNI_QUEUE_RX.
+ * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX.
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution.
+ * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_congestion_point cg_point,
- enum dpni_queue_type q_type,
+ enum dpni_queue_type qtype,
uint8_t tc,
- uint8_t q_index,
+ uint8_t index,
struct dpni_taildrop *taildrop)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
cmd_flags,
token);
- DPNI_CMD_SET_TAILDROP(cmd, cg_point, q_type, tc, q_index, taildrop);
-
- /* send command to mc*/
+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->units = taildrop->units;
+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+ dpni_set_field(cmd_params->enable_oal_lo, ENABLE, taildrop->enable);
+ dpni_set_field(cmd_params->enable_oal_lo, OAL_LO, taildrop->oal);
+ dpni_set_field(cmd_params->oal_hi,
+ OAL_HI,
+ taildrop->oal >> DPNI_OAL_LO_SIZE);
+
+ /* send command to mc */
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
int dpni_get_taildrop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
- enum dpni_congestion_point cg_point,
- enum dpni_queue_type q_type,
- uint8_t tc,
- uint8_t q_index,
- struct dpni_taildrop *taildrop)
+ uint16_t token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_taildrop *taildrop)
{
struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ uint8_t oal_lo, oal_hi;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
cmd_flags,
token);
- DPNI_CMD_GET_TAILDROP(cmd, cg_point, q_type, tc, q_index);
+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
- /* send command to mc*/
+ /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPNI_RSP_GET_TAILDROP(cmd, taildrop);
+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+ taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE);
+ taildrop->units = rsp_params->units;
+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+ oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO);
+ oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI);
+ taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo;
+
+ /* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */
+ if (taildrop->oal >= 0x0800)
+ taildrop->oal |= 0xF000;
return 0;
}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
index 2391e401..7f46bafb 100644
--- a/drivers/net/dpaa2/mc/fsl_dpkg.h
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -93,6 +93,15 @@ struct dpkg_mask {
uint8_t offset;
};
+/* Macros for accessing command fields smaller than 1byte */
+#define DPKG_MASK(field) \
+ GENMASK(DPKG_##field##_SHIFT + DPKG_##field##_SIZE - 1, \
+ DPKG_##field##_SHIFT)
+#define dpkg_set_field(var, field, val) \
+ ((var) |= (((val) << DPKG_##field##_SHIFT) & DPKG_MASK(field)))
+#define dpkg_get_field(var, field) \
+ (((var) & DPKG_MASK(field)) >> DPKG_##field##_SHIFT)
+
/**
* struct dpkg_extract - A structure for defining a single extraction
* @type: Determines how the union below is interpreted:
@@ -136,12 +145,12 @@ struct dpkg_extract {
*/
struct {
- enum net_prot prot;
+ enum net_prot prot;
enum dpkg_extract_from_hdr_type type;
- uint32_t field;
- uint8_t size;
- uint8_t offset;
- uint8_t hdr_index;
+ uint32_t field;
+ uint8_t size;
+ uint8_t offset;
+ uint8_t hdr_index;
} from_hdr;
/**
* struct from_data
@@ -166,8 +175,8 @@ struct dpkg_extract {
} from_parse;
} extract;
- uint8_t num_of_byte_masks;
- struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+ uint8_t num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
};
/**
@@ -181,4 +190,48 @@ struct dpkg_profile_cfg {
struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
};
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+#define DPKG_EFH_TYPE_SHIFT 0
+#define DPKG_EFH_TYPE_SIZE 4
+#define DPKG_EXTRACT_TYPE_SHIFT 0
+#define DPKG_EXTRACT_TYPE_SIZE 4
+
+struct dpni_dist_extract {
+ /* word 0 */
+ uint8_t prot;
+ /* EFH type stored in the 4 least significant bits */
+ uint8_t efh_type;
+ uint8_t size;
+ uint8_t offset;
+ uint32_t field;
+ /* word 1 */
+ uint8_t hdr_index;
+ uint8_t constant;
+ uint8_t num_of_repeats;
+ uint8_t num_of_byte_masks;
+ /* Extraction type is stored in the 4 LSBs */
+ uint8_t extract_type;
+ uint8_t pad[3];
+ /* word 2 */
+ struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+ /* extension word 0 */
+ uint8_t num_extracts;
+ uint8_t pad[7];
+ /* words 1..25 */
+ struct dpni_dist_extract extracts[10];
+};
+
+int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf);
+
#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 64db70dc..5227ea15 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -110,47 +110,19 @@ struct fsl_mc_io;
*/
#define DPNI_OPT_NO_FS 0x000020
-/**
- * dpni_open() - Open a control session for the specified object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dpni_id: DPNI unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpni_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpni_id,
- uint16_t *token);
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token);
-/**
- * dpni_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpni_cfg - Structure representing DPNI configuration
- * @mac_addr: Primary MAC address
- * @adv: Advanced parameters; default is all zeros;
+ * @mac_addr: Primary MAC address
+ * @adv: Advanced parameters; default is all zeros;
* use this structure to change default settings
*/
struct dpni_cfg {
@@ -217,141 +189,111 @@ struct dpni_cfg {
uint8_t qos_entries;
};
-/**
- * dpni_create() - Create the DPNI object
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: returned object id
- *
- * Create the DPNI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpni_cfg *cfg,
- uint32_t *obj_id);
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpni_destroy() - Destroy the DPNI object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
/**
* struct dpni_pools_cfg - Structure representing buffer pools configuration
- * @num_dpbp: Number of DPBPs
- * @pools: Array of buffer pools parameters; The number of valid entries
- * must match 'num_dpbp' value
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
*/
struct dpni_pools_cfg {
- uint8_t num_dpbp;
+ uint8_t num_dpbp;
/**
* struct pools - Buffer pools parameters
* @dpbp_id: DPBP object ID
+ * @priority: priority mask that indicates TC's used with this buffer.
+ * I set to 0x00 MC will assume value 0xff.
* @buffer_size: Buffer size
* @backup_pool: Backup pool
*/
struct {
int dpbp_id;
+ uint8_t priority_mask;
uint16_t buffer_size;
int backup_pool;
} pools[DPNI_MAX_DPBP];
};
-/**
- * dpni_set_pools() - Set buffer pools configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @cfg: Buffer pools configuration
- *
- * mandatory for DPNI operation
- * warning:Allowed only when DPNI is disabled
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_pools(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- const struct dpni_pools_cfg *cfg);
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg);
-/**
- * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
- * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * Return: '0' on Success; Error code otherwise.
+ * DPNI IRQ Index and Events
*/
-int dpni_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
/**
- * dpni_is_enabled() - Check if the DPNI is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
+ * IRQ index
*/
-int dpni_is_enabled(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
-
+#define DPNI_IRQ_INDEX 0
/**
- * dpni_reset() - Reset the DPNI, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * Return: '0' on Success; Error code otherwise.
+ * IRQ event - indicates a change in link state
*/
-int dpni_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en);
+
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en);
+
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask);
+
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status);
+
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status);
/**
* struct dpni_attr - Structure representing DPNI attributes
@@ -363,7 +305,8 @@ int dpni_reset(struct fsl_mc_io *mc_io,
* DPNI_OPT_HAS_KEY_MASKING
* DPNI_OPT_NO_FS
* @num_queues: Number of Tx and Rx queues used for traffic distribution.
- * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI.
+ * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI.
* @mac_filter_entries: Number of entries in the MAC address filtering
* table.
* @vlan_filter_entries: Number of entries in the VLAN address filtering
@@ -390,7 +333,8 @@ int dpni_reset(struct fsl_mc_io *mc_io,
struct dpni_attr {
uint32_t options;
uint8_t num_queues;
- uint8_t num_tcs;
+ uint8_t num_rx_tcs;
+ uint8_t num_tx_tcs;
uint8_t mac_filter_entries;
uint8_t vlan_filter_entries;
uint8_t qos_entries;
@@ -400,19 +344,10 @@ struct dpni_attr {
uint16_t wriop_version;
};
-/**
- * dpni_get_attributes() - Retrieve DPNI attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @attr: Object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_attr *attr);
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr);
/**
* DPNI errors
@@ -457,33 +392,22 @@ enum dpni_error_action {
/**
* struct dpni_error_cfg - Structure representing DPNI errors treatment
- * @errors: Errors mask; use 'DPNI_ERROR__<X>
- * @error_action: The desired action for the errors mask
- * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
- * status (FAS); relevant only for the non-discard action
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame
+ * annotation status (FAS); relevant only
+ * for the non-discard action
*/
struct dpni_error_cfg {
- uint32_t errors;
- enum dpni_error_action error_action;
- int set_frame_annotation;
+ uint32_t errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
};
-/**
- * dpni_set_errors_behavior() - Set errors behavior
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @cfg: Errors configuration
- *
- * this function may be called numerous times with different
- * error masks
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_error_cfg *cfg);
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg);
/**
* DPNI buffer layout modification options
@@ -520,25 +444,26 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
/**
* struct dpni_buffer_layout - Structure representing DPNI buffer layout
- * @options: Flags representing the suggested modifications to the buffer
- * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
- * @pass_timestamp: Pass timestamp value
- * @pass_parser_result: Pass parser results
- * @pass_frame_status: Pass frame status
- * @private_data_size: Size kept for private data (in bytes)
- * @data_align: Data alignment
- * @data_head_room: Data head room
- * @data_tail_room: Data tail room
+ * @options: Flags representing the suggested modifications to the
+ * buffer layout;
+ * Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
*/
struct dpni_buffer_layout {
- uint32_t options;
- int pass_timestamp;
- int pass_parser_result;
- int pass_frame_status;
- uint16_t private_data_size;
- uint16_t data_align;
- uint16_t data_head_room;
- uint16_t data_tail_room;
+ uint32_t options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t data_head_room;
+ uint16_t data_tail_room;
};
/**
@@ -547,45 +472,24 @@ struct dpni_buffer_layout {
* @DPNI_QUEUE_TX: Tx queue
* @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
* @DPNI_QUEUE_RX_ERR: Rx error queue
- */enum dpni_queue_type {
+ */
+enum dpni_queue_type {
DPNI_QUEUE_RX,
DPNI_QUEUE_TX,
DPNI_QUEUE_TX_CONFIRM,
DPNI_QUEUE_RX_ERR,
};
-/**
- * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue to get the layout from
- * @layout: Returns buffer layout attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- struct dpni_buffer_layout *layout);
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
-/**
- * dpni_set_buffer_layout() - Set buffer layout configuration.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue to set layout on
- * @layout: Buffer layout configuration
- *
- * Return: '0' on Success; Error code otherwise.
- *
- * @warning Allowed only when DPNI is disabled
- */
-int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
const struct dpni_buffer_layout *layout);
/**
@@ -594,72 +498,39 @@ int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
* @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
* @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
* @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ * @DPNI_OPT_FLCTYPE_HASH: flow context will be generated by WRIOP for AIOP or
+ * for CPU
*/
enum dpni_offload {
DPNI_OFF_RX_L3_CSUM,
DPNI_OFF_RX_L4_CSUM,
DPNI_OFF_TX_L3_CSUM,
DPNI_OFF_TX_L4_CSUM,
+ DPNI_FLCTYPE_HASH,
};
-/**
- * dpni_set_offload() - Set DPNI offload configuration.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @type: Type of DPNI offload
- * @config: Offload configuration.
- * For checksum offloads, non-zero value enables
- * the offload.
- *
- * Return: '0' on Success; Error code otherwise.
- *
- * @warning Allowed only when DPNI is disabled
- */
int dpni_set_offload(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_offload type,
uint32_t config);
-/**
- * dpni_get_offload() - Get DPNI offload configuration.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @type: Type of DPNI offload
- * @config: Offload configuration.
- * For checksum offloads, a value of 1 indicates that the
- * offload is enabled.
- *
- * Return: '0' on Success; Error code otherwise.
- *
- * @warning Allowed only when DPNI is disabled
- */
int dpni_get_offload(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_offload type,
uint32_t *config);
-/**
- * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
- * for enqueue operations
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue to get QDID for. For applications lookig to
- * transmit traffic this should be set to DPNI_QUEUE_TX
- * @qdid: Returned virtual QDID value that should be used as an argument
- * in all enqueue operations
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_qdid(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- uint16_t *qdid);
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *data_offset);
#define DPNI_STATISTICS_CNT 7
@@ -715,6 +586,23 @@ union dpni_statistics {
uint64_t egress_confirmed_frames;
} page_2;
/**
+ * struct page_3 - Page_3 statistics structure with values for the
+ * selected TC
+ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
+ * dequeued
+ * @ceetm_dequeue_frames: Cumulative count of the number of frames
+ * dequeued
+ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
+ * frames whose enqueue was rejected
+ * @ceetm_reject_frames: Cumulative count of all frame enqueues rejected
+ */
+ struct {
+ uint64_t ceetm_dequeue_bytes;
+ uint64_t ceetm_dequeue_frames;
+ uint64_t ceetm_reject_bytes;
+ uint64_t ceetm_reject_frames;
+ } page_3;
+ /**
* struct raw - raw statistics structure, used to index counters
*/
struct {
@@ -738,6 +626,10 @@ union dpni_statistics {
* Enable a-symmetric pause frames
*/
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/**
* struct - Structure representing DPNI link configuration
@@ -749,278 +641,117 @@ struct dpni_link_cfg {
uint64_t options;
};
-/**
- * dpni_set_link_cfg() - set the link configuration.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @cfg: Link configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- const struct dpni_link_cfg *cfg);
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_link_cfg *cfg);
/**
* struct dpni_link_state - Structure representing DPNI link state
- * @rate: Rate
- * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
- * @up: Link state; '0' for down, '1' for up
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
*/
struct dpni_link_state {
- uint32_t rate;
- uint64_t options;
- int up;
+ uint32_t rate;
+ uint64_t options;
+ int up;
};
-/**
- * dpni_get_link_state() - Return the link state (either up or down)
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @state: Returned link state;
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_link_state(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_link_state *state);
-
-/**
- * dpni_set_max_frame_length() - Set the maximum received frame length.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @max_frame_length: Maximum received frame length (in
- * bytes); frame is discarded if its
- * length exceeds this value
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint16_t max_frame_length);
-
-/**
- * dpni_get_max_frame_length() - Get the maximum received frame length.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @max_frame_length: Maximum received frame length (in
- * bytes); frame is discarded if its
- * length exceeds this value
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint16_t *max_frame_length);
-
-/**
- * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Set to '1' to enable; '0' to disable
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int en);
-
-/**
- * dpni_get_multicast_promisc() - Get multicast promiscuous mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Returns '1' if enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
-
-/**
- * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Set to '1' to enable; '0' to disable
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int en);
-
-/**
- * dpni_get_unicast_promisc() - Get unicast promiscuous mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Returns '1' if enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int *en);
-
-/**
- * dpni_set_primary_mac_addr() - Set the primary MAC address
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mac_addr: MAC address to set as primary address
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- const uint8_t mac_addr[6]);
-
-/**
- * dpni_get_primary_mac_addr() - Get the primary MAC address
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mac_addr: Returned MAC address
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t mac_addr[6]);
-
-/**
- * dpni_add_mac_addr() - Add MAC address filter
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mac_addr: MAC address to add
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- const uint8_t mac_addr[6]);
-
-/**
- * dpni_remove_mac_addr() - Remove MAC address filter
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mac_addr: MAC address to remove
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- const uint8_t mac_addr[6]);
-
-/**
- * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @unicast: Set to '1' to clear unicast addresses
- * @multicast: Set to '1' to clear multicast addresses
- *
- * The primary MAC address is not cleared by this operation.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int unicast,
- int multicast);
-
-/**
- * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
- * port the DPNI is attached to
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mac_addr: MAC address of the physical port, if any, otherwise 0
- *
- * The primary MAC address is not modified by this operation.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t mac_addr[6]);
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length);
+
+int dpni_set_mtu(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t mtu);
+
+int dpni_get_mtu(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *mtu);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
-/**
- * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @en: Set to '1' to enable; '0' to disable
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- int en);
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int unicast,
+ int multicast);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
-/**
- * dpni_add_vlan_id() - Add VLAN ID filter
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @vlan_id: VLAN ID to add
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_add_vlan_id(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint16_t vlan_id);
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t vlan_id);
-/**
- * dpni_remove_vlan_id() - Remove VLAN ID filter
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @vlan_id: VLAN ID to remove
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_remove_vlan_id(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint16_t vlan_id);
-
-/**
- * dpni_clear_vlan_filters() - Clear all VLAN filters
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* enum dpni_dist_mode - DPNI distribution mode
@@ -1050,27 +781,16 @@ enum dpni_fs_miss_action {
/**
* struct dpni_fs_tbl_cfg - Flow Steering table configuration
- * @miss_action: Miss action selection
- * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
*/
struct dpni_fs_tbl_cfg {
- enum dpni_fs_miss_action miss_action;
- uint16_t default_flow_id;
+ enum dpni_fs_miss_action miss_action;
+ uint16_t default_flow_id;
+ char keep_hash_key;
};
/**
- * dpni_prepare_key_cfg() - function prepare extract parameters
- * @cfg: defining a full Key Generation profile (rule)
- * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
- *
- * This function has to be called before the following functions:
- * - dpni_set_rx_tc_dist()
- * - dpni_set_qos_table()
- */
-int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
- uint8_t *key_cfg_buf);
-
-/**
* struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
* @dist_size: Set the distribution size;
* supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
@@ -1078,36 +798,24 @@ int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
* @dist_mode: Distribution mode
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* the extractions to be used for the distribution key by calling
- * dpni_prepare_key_cfg() relevant only when
+ * dpkg_prepare_key_cfg() relevant only when
* 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
* @fs_cfg: Flow Steering table configuration; only relevant if
* 'dist_mode = DPNI_DIST_MODE_FS'
*/
struct dpni_rx_tc_dist_cfg {
- uint16_t dist_size;
- enum dpni_dist_mode dist_mode;
- uint64_t key_cfg_iova;
- struct dpni_fs_tbl_cfg fs_cfg;
+ uint16_t dist_size;
+ enum dpni_dist_mode dist_mode;
+ uint64_t key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
};
-/**
- * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @tc_id: Traffic class selection (0-7)
- * @cfg: Traffic class distribution configuration
- *
- * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
- * first to prepare the key_cfg_iova parameter
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t tc_id,
- const struct dpni_rx_tc_dist_cfg *cfg);
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
/**
* enum dpni_congestion_unit - DPNI congestion units
* @DPNI_CONGESTION_UNIT_BYTES: bytes units
@@ -1147,9 +855,9 @@ enum dpni_dest {
* channel; not relevant for 'DPNI_DEST_NONE' option
*/
struct dpni_dest_cfg {
- enum dpni_dest dest_type;
- int dest_id;
- uint8_t priority;
+ enum dpni_dest dest_type;
+ int dest_id;
+ uint8_t priority;
};
/* DPNI congestion options */
@@ -1186,6 +894,11 @@ struct dpni_dest_cfg {
* sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
*/
#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
+/**
+ * This congestion will trigger flow control or priority flow control. This
+ * will have effect only if flow control is enabled with dpni_set_link_cfg()
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
/**
* struct dpni_congestion_notification_cfg - congestion notification
@@ -1203,54 +916,35 @@ struct dpni_dest_cfg {
*/
struct dpni_congestion_notification_cfg {
- enum dpni_congestion_unit units;
- uint32_t threshold_entry;
- uint32_t threshold_exit;
- uint64_t message_ctx;
- uint64_t message_iova;
- struct dpni_dest_cfg dest_cfg;
- uint16_t notification_mode;
+ enum dpni_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ uint16_t notification_mode;
};
-/**
- * dpni_set_congestion_notification() - Set traffic class congestion
- * notification configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id: Traffic class selection (0-7)
- * @cfg: congestion notification configuration
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_set_congestion_notification(
- struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- uint8_t tc_id,
+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
const struct dpni_congestion_notification_cfg *cfg);
-/**
- * dpni_get_congestion_notification() - Get traffic class congestion
- * notification configuration
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id: Traffic class selection (0-7)
- * @cfg: congestion notification configuration
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_queue_type qtype,
- uint8_t tc_id,
+int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc_id,
struct dpni_congestion_notification_cfg *cfg);
+/* DPNI FLC stash options */
+
+/**
+ * stashes the whole annotation area (up to 192 bytes)
+ */
+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001
/**
* struct dpni_queue - Queue structure
@@ -1291,9 +985,25 @@ struct dpni_queue {
uint64_t user_context;
/**
* struct flc - FD FLow Context structure
- * @value: FLC value to set
- * @stash_control: Boolean, indicates whether the 6 lowest
- * significant bits are used for stash control.
+ * @value: Default FLC value for traffic dequeued from
+ * this queue. Please check description of FD
+ * structure for more information.
+ * Note that FLC values set using dpni_add_fs_entry,
+ * if any, take precedence over values per queue.
+ * @stash_control: Boolean, indicates whether the 6 lowest
+ * - significant bits are used for stash control.
+ * significant bits are used for stash control. If set, the 6
+ * least significant bits in value are interpreted as follows:
+ * - bits 0-1: indicates the number of 64 byte units of context
+ * that are stashed. FLC value is interpreted as a memory address
+ * in this case, excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame
+ * annotation to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame
+ * data to be stashed. Frame data is placed at FD[ADDR] +
+ * FD[OFFSET].
+ * For more details check the Frame Descriptor section in the
+ * hardware documentation.
*/
struct {
uint64_t value;
@@ -1331,41 +1041,16 @@ enum dpni_confirmation_mode {
DPNI_CONF_DISABLE,
};
-/**
- * dpni_set_tx_confirmation_mode() - Tx confirmation mode
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @mode: Tx confirmation mode
- *
- * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
- * selected at DPNI creation.
- * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all
- * transmit confirmation (including the private confirmation queues), regardless
- * of previous settings; Note that in this case, Tx error frames are still
- * enqueued to the general transmit errors queue.
- * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
- * Tx confirmations to a shared Tx conf queue. The ID of the queue when
- * calling dpni_set/get_queue is -1.
- * Tx confirmation mode can only be changed while the DPNI is disabled.
- * Executing this command while the DPNI is enabled will return an error.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_confirmation_mode mode);
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode);
+
+int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode *mode);
-/**
- * dpni_get_api_version() - Get Data Path Network Interface API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path network interface API
- * @minor_ver: Minor version of data path network interface API
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
@@ -1396,23 +1081,6 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
*/
#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
-/**
- * dpni_set_queue() - Set queue parameters
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue - all queue types are supported, although
- * the command is ignored for Tx
- * @tc: Traffic class, in range 0 to NUM_TCS - 1
- * @index: Selects the specific queue out of the set
- * allocated for the same TC.Value must be in
- * range 0 to NUM_QUEUES - 1
- * @options: A combination of DPNI_QUEUE_OPT_ values that control
- * what configuration options are set on the queue
- * @queue: Queue configuration structure
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -1422,31 +1090,6 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint8_t options,
const struct dpni_queue *queue);
-/**
- * dpni_get_queue() - Get queue parameters
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @qtype: Type of queue - all queue types are supported
- * @tc: Traffic class, in range 0 to NUM_TCS - 1
- * @index: Selects the specific queue out of the set allocated
- * for the same TC. Value must be in range 0 to
- * NUM_QUEUES - 1
- * @queue: Queue configuration structure
- * @qid: Queue identification
- *
- * This function returns current queue configuration which can be changed by
- * calling dpni_set_queue, and queue identification information.
- * Returned qid.fqid and/or qid.qdbin values can be used to:
- * - enqueue traffic for Tx queues,
- * - perform volatile dequeue for Rx and, if applicable, Tx confirmation
- * clean-up,
- * - retrieve queue state.
- *
- * All these operations are supported through the DPIO run-time API.
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -1456,32 +1099,13 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
struct dpni_queue *queue,
struct dpni_queue_id *qid);
-/**
- * dpni_get_statistics() - Get DPNI statistics
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @page: Selects the statistics page to retrieve, see
- * DPNI_GET_STATISTICS output.
- * Pages are numbered 0 to 2.
- * @stat: Structure containing the statistics
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_get_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
uint8_t page,
+ uint8_t param,
union dpni_statistics *stat);
-/**
- * dpni_reset_statistics() - Clears DPNI statistics
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_reset_statistics(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token);
@@ -1505,40 +1129,25 @@ enum dpni_congestion_point {
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
* @units: Indicates the unit of THRESHOLD. Queue taildrop only
- * supports byte units, this field is ignored and
- * assumed = 0 if CONGESTION_POINT is 0.
+ * supports byte units, this field is ignored and
+ * assumed = 0 if CONGESTION_POINT is 0.
* @threshold: Threshold value, in units identified by UNITS field. Value 0
- * cannot be used as a valid taildrop threshold,
- * THRESHOLD must be > 0 if the taildrop is
- * enabled.
+ * cannot be used as a valid taildrop threshold,
+ * THRESHOLD must be > 0 if the taildrop is
+ * enabled.
+ * @oal : Overhead Accounting Length, a 12-bit, 2's complement value
+ * with range (-2048 to +2047) representing a fixed per-frame
+ * overhead to be added to the actual length of a frame when
+ * performing WRED and tail drop calculations and threshold
+ * comparisons.
*/
struct dpni_taildrop {
char enable;
enum dpni_congestion_unit units;
uint32_t threshold;
+ int16_t oal;
};
-/**
- * dpni_set_taildrop() - Set taildrop per queue or TC
- *
- * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current
- * congestion notification or early drop (WRED) configuration previously applied
- * to the same TC.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @cg_point: Congestion point. DPNI_CP_QUEUE is only supported in
- * combination with DPNI_QUEUE_RX.
- * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX.
- * @tc: Traffic class to apply this taildrop to
- * @q_index: Index of the queue if the DPNI supports multiple queues for
- * traffic distribution.
- * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE.
- * @taildrop: Taildrop structure
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
@@ -1548,21 +1157,6 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint8_t q_index,
struct dpni_taildrop *taildrop);
-/**
- * dpni_get_taildrop() - Get taildrop information
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @cg_point: Congestion point
- * @q_type:
- * @tc: Traffic class to apply this taildrop to
- * @q_index: Index of the queue if the DPNI supports multiple queues for
- * traffic distribution. Ignored if CONGESTION_POINT
- * is not 0.
- * @taildrop: Taildrop structure
- *
- * Return: '0' on Success; Error code otherwise.
- */
int dpni_get_taildrop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 2ac397cd..1a483329 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,435 +42,597 @@
/* DPNI Version */
#define DPNI_VER_MAJOR 7
-#define DPNI_VER_MINOR 0
+#define DPNI_VER_MINOR 3
+
+#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_VERSION_2 2
+#define DPNI_CMD_ID_OFFSET 4
+
+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2)
/* Command IDs */
-#define DPNI_CMDID_OPEN ((0x801 << 4) | (0x1))
-#define DPNI_CMDID_CLOSE ((0x800 << 4) | (0x1))
-#define DPNI_CMDID_CREATE ((0x901 << 4) | (0x1))
-#define DPNI_CMDID_DESTROY ((0x981 << 4) | (0x1))
-#define DPNI_CMDID_GET_API_VERSION ((0xa01 << 4) | (0x1))
-
-#define DPNI_CMDID_ENABLE ((0x002 << 4) | (0x1))
-#define DPNI_CMDID_DISABLE ((0x003 << 4) | (0x1))
-#define DPNI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
-#define DPNI_CMDID_RESET ((0x005 << 4) | (0x1))
-#define DPNI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
-
-#define DPNI_CMDID_SET_POOLS ((0x200 << 4) | (0x1))
-#define DPNI_CMDID_SET_ERRORS_BEHAVIOR ((0x20B << 4) | (0x1))
-
-#define DPNI_CMDID_GET_QDID ((0x210 << 4) | (0x1))
-#define DPNI_CMDID_GET_LINK_STATE ((0x215 << 4) | (0x1))
-#define DPNI_CMDID_SET_MAX_FRAME_LENGTH ((0x216 << 4) | (0x1))
-#define DPNI_CMDID_GET_MAX_FRAME_LENGTH ((0x217 << 4) | (0x1))
-#define DPNI_CMDID_SET_LINK_CFG ((0x21a << 4) | (0x1))
-
-#define DPNI_CMDID_SET_MCAST_PROMISC ((0x220 << 4) | (0x1))
-#define DPNI_CMDID_GET_MCAST_PROMISC ((0x221 << 4) | (0x1))
-#define DPNI_CMDID_SET_UNICAST_PROMISC ((0x222 << 4) | (0x1))
-#define DPNI_CMDID_GET_UNICAST_PROMISC ((0x223 << 4) | (0x1))
-#define DPNI_CMDID_SET_PRIM_MAC ((0x224 << 4) | (0x1))
-#define DPNI_CMDID_GET_PRIM_MAC ((0x225 << 4) | (0x1))
-#define DPNI_CMDID_ADD_MAC_ADDR ((0x226 << 4) | (0x1))
-#define DPNI_CMDID_REMOVE_MAC_ADDR ((0x227 << 4) | (0x1))
-#define DPNI_CMDID_CLR_MAC_FILTERS ((0x228 << 4) | (0x1))
-
-#define DPNI_CMDID_ENABLE_VLAN_FILTER ((0x230 << 4) | (0x1))
-#define DPNI_CMDID_ADD_VLAN_ID ((0x231 << 4) | (0x1))
-#define DPNI_CMDID_REMOVE_VLAN_ID ((0x232 << 4) | (0x1))
-#define DPNI_CMDID_CLR_VLAN_FILTERS ((0x233 << 4) | (0x1))
-
-#define DPNI_CMDID_SET_RX_TC_DIST ((0x235 << 4) | (0x1))
-
-#define DPNI_CMDID_GET_STATISTICS ((0x25D << 4) | (0x1))
-#define DPNI_CMDID_RESET_STATISTICS ((0x25E << 4) | (0x1))
-#define DPNI_CMDID_GET_QUEUE ((0x25F << 4) | (0x1))
-#define DPNI_CMDID_SET_QUEUE ((0x260 << 4) | (0x1))
-#define DPNI_CMDID_GET_TAILDROP ((0x261 << 4) | (0x1))
-#define DPNI_CMDID_SET_TAILDROP ((0x262 << 4) | (0x1))
-
-#define DPNI_CMDID_GET_PORT_MAC_ADDR ((0x263 << 4) | (0x1))
-
-#define DPNI_CMDID_GET_BUFFER_LAYOUT ((0x264 << 4) | (0x1))
-#define DPNI_CMDID_SET_BUFFER_LAYOUT ((0x265 << 4) | (0x1))
-
-#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION ((0x267 << 4) | (0x1))
-#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION ((0x268 << 4) | (0x1))
-#define DPNI_CMDID_GET_OFFLOAD ((0x26B << 4) | (0x1))
-#define DPNI_CMDID_SET_OFFLOAD ((0x26C << 4) | (0x1))
-#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE ((0x266 << 4) | (0x1))
-#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE ((0x26D << 4) | (0x1))
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_OPEN(cmd, dpni_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, (cfg)->options); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, (cfg)->num_queues); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, (cfg)->num_tcs); \
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, (cfg)->mac_filter_entries); \
- MC_CMD_OP(cmd, 1, 0, 8, uint8_t, (cfg)->vlan_filter_entries); \
- MC_CMD_OP(cmd, 1, 16, 8, uint8_t, (cfg)->qos_entries); \
- MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->fs_entries); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_POOLS(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
- MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
- MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
- MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
- MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
- MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
- MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
- MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
- MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
- MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
- MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
- MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
- MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
- MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
- MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
- MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
- MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
- MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
- MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
- MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
- MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
- MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
- MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
- MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
- MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* DPNI_CMD_GET_ATTR is not used, no input parameters */
-
-#define DPNI_RSP_GET_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, (attr)->options); \
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, (attr)->num_queues); \
- MC_RSP_OP(cmd, 0, 40, 8, uint8_t, (attr)->num_tcs); \
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->mac_filter_entries); \
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, (attr)->vlan_filter_entries); \
- MC_RSP_OP(cmd, 1, 16, 8, uint8_t, (attr)->qos_entries); \
- MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (attr)->fs_entries); \
- MC_RSP_OP(cmd, 2, 0, 8, uint8_t, (attr)->qos_key_size); \
- MC_RSP_OP(cmd, 2, 8, 8, uint8_t, (attr)->fs_key_size); \
- MC_RSP_OP(cmd, 2, 16, 16, uint16_t, (attr)->wriop_version); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
- MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
- MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
-} while (0)
-
-#define DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype) \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
-
-#define DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout) \
-do { \
- MC_RSP_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
- MC_RSP_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
- MC_RSP_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
- MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
- MC_RSP_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
- MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
- MC_RSP_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
-} while (0)
-
-#define DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
- MC_CMD_OP(cmd, 0, 32, 16, uint16_t, (layout)->options); \
- MC_CMD_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
- MC_CMD_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
- MC_CMD_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
- MC_CMD_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
- MC_CMD_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
- MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
- MC_CMD_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
-} while (0)
-
-#define DPNI_CMD_SET_OFFLOAD(cmd, type, config) \
-do { \
- MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type); \
- MC_CMD_OP(cmd, 0, 32, 32, uint32_t, config); \
-} while (0)
-
-#define DPNI_CMD_GET_OFFLOAD(cmd, type) \
- MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type)
-
-#define DPNI_RSP_GET_OFFLOAD(cmd, config) \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, config)
-
-#define DPNI_CMD_GET_QDID(cmd, qtype) \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_QDID(cmd, qdid) \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
-
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_GET_STATISTICS(cmd, page) \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, page)
-
-#define DPNI_RSP_GET_STATISTICS(cmd, stat) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 64, uint64_t, (stat)->raw.counter[0]); \
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, (stat)->raw.counter[1]); \
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (stat)->raw.counter[2]); \
- MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (stat)->raw.counter[3]); \
- MC_RSP_OP(cmd, 4, 0, 64, uint64_t, (stat)->raw.counter[4]); \
- MC_RSP_OP(cmd, 5, 0, 64, uint64_t, (stat)->raw.counter[5]); \
- MC_RSP_OP(cmd, 6, 0, 64, uint64_t, (stat)->raw.counter[6]); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\
- MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
-do { \
- MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
- MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
- MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \
- MC_CMD_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
- MC_CMD_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
-do { \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
- MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
-do { \
- MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
- MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
- MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
- MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-} while (0)
-
-#define DPNI_RSP_GET_PORT_MAC_ADDR(cmd, mac_addr) \
-do { \
- MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
- MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
- MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
- MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
- MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \
-do { \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
- MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \
-do { \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
- MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \
- MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_ENABLE_VLAN_FILTER(cmd, en) \
- MC_CMD_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \
- MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \
- MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id)
-
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
- MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
- MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
- cfg->fs_cfg.miss_action); \
- MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
- MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
-} while (0)
-
-#define DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
-} while (0)
-
-#define DPNI_RSP_GET_QUEUE(cmd, queue, queue_id) \
-do { \
- MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
- MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
- MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
- MC_RSP_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
- MC_RSP_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
- MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
- MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (queue_id)->fqid); \
- MC_RSP_OP(cmd, 4, 32, 16, uint16_t, (queue_id)->qdbin); \
-} while (0)
-
-#define DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, options); \
- MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
- MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
- MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
- MC_CMD_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
- MC_CMD_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
- MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
- MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPNI_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
-
-#define DPNI_CMD_GET_TAILDROP(cmd, cp, q_type, tc, q_index) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
- MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
-} while (0)
-
-#define DPNI_RSP_GET_TAILDROP(cmd, taildrop) \
-do { \
- MC_RSP_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
- MC_RSP_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
- (taildrop)->units); \
- MC_RSP_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
-} while (0)
-
-#define DPNI_CMD_SET_TAILDROP(cmd, cp, q_type, tc, q_index, taildrop) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_congestion_point, cp); \
- MC_CMD_OP(cmd, 0, 8, 8, enum dpni_queue_type, q_type); \
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc); \
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, q_index); \
- MC_CMD_OP(cmd, 1, 0, 1, char, (taildrop)->enable); \
- MC_CMD_OP(cmd, 1, 16, 8, enum dpni_congestion_unit, \
- (taildrop)->units); \
- MC_CMD_OP(cmd, 1, 32, 32, uint32_t, (taildrop)->threshold); \
-} while (0)
-
-#define DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode) \
- MC_CMD_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
-
-#define DPNI_RSP_GET_TX_CONFIRMATION_MODE(cmd, mode) \
- MC_RSP_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
-
-#define DPNI_CMD_SET_CONGESTION_NOTIFICATION(cmd, qtype, tc, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
- MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
- MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->notification_mode); \
- MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
- MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
- MC_CMD_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
- MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
- MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
- MC_CMD_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
- MC_CMD_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
-} while (0)
-
-#define DPNI_CMD_GET_CONGESTION_NOTIFICATION(cmd, qtype, tc) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
-} while (0)
-
-#define DPNI_RSP_GET_CONGESTION_NOTIFICATION(cmd, cfg) \
-do { \
- MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (cfg)->dest_cfg.dest_id); \
- MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (cfg)->notification_mode); \
- MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (cfg)->dest_cfg.priority); \
- MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (cfg)->dest_cfg.dest_type); \
- MC_RSP_OP(cmd, 1, 60, 2, enum dpni_congestion_unit, (cfg)->units); \
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (cfg)->message_iova); \
- MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (cfg)->message_ctx); \
- MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (cfg)->threshold_entry); \
- MC_RSP_OP(cmd, 4, 32, 32, uint32_t, (cfg)->threshold_exit); \
-} while (0)
+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD_V2(0x004)
+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233)
+
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V2(0x235)
+
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
+#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+ DPNI_##field##_SHIFT)
+#define dpni_set_field(var, field, val) \
+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field) \
+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpni_cmd_open {
+ uint32_t dpni_id;
+};
+
+struct dpni_cmd_create {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t pad1;
+ uint8_t vlan_filter_entries;
+ uint8_t pad2;
+ uint8_t qos_entries;
+ uint8_t pad3;
+ uint16_t fs_entries;
+};
+
+struct dpni_cmd_destroy {
+ uint32_t dpsw_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+
+struct dpni_cmd_pool {
+ uint16_t dpbp_id;
+ uint8_t priority_mask;
+ uint8_t pad;
+};
+
+struct dpni_cmd_set_pools {
+ uint8_t num_dpbp;
+ uint8_t backup_pool_mask;
+ uint16_t pad;
+ struct dpni_cmd_pool pool[8];
+ uint16_t buffer_size[8];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT 0
+#define DPNI_ENABLE_SIZE 1
+
+struct dpni_rsp_is_enabled {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_irq_enable {
+ uint8_t enable;
+ uint8_t pad[3];
+ uint8_t irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+ uint32_t pad;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+ uint32_t mask;
+ uint8_t irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+ uint32_t pad;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+ uint32_t mask;
+};
+
+struct dpni_cmd_get_irq_status {
+ uint32_t status;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+ uint32_t status;
+};
+
+struct dpni_cmd_clear_irq_status {
+ uint32_t status;
+ uint8_t irq_index;
+};
+
+struct dpni_rsp_get_attr {
+ /* response word 0 */
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_rx_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t num_tx_tcs;
+ /* response word 1 */
+ uint8_t vlan_filter_entries;
+ uint8_t pad1;
+ uint8_t qos_entries;
+ uint8_t pad2;
+ uint16_t fs_entries;
+ uint16_t pad3;
+ /* response word 2 */
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT 0
+#define DPNI_ERROR_ACTION_SIZE 4
+#define DPNI_FRAME_ANN_SHIFT 4
+#define DPNI_FRAME_ANN_SIZE 1
+
+struct dpni_cmd_set_errors_behavior {
+ uint32_t errors;
+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
+ uint8_t flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+#define DPNI_PASS_TS_SHIFT 0
+#define DPNI_PASS_TS_SIZE 1
+#define DPNI_PASS_PR_SHIFT 1
+#define DPNI_PASS_PR_SIZE 1
+#define DPNI_PASS_FS_SHIFT 2
+#define DPNI_PASS_FS_SIZE 1
+
+struct dpni_cmd_get_buffer_layout {
+ uint8_t qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+ /* response word 0 */
+ uint8_t pad0[6];
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ uint8_t flags;
+ uint8_t pad1;
+ /* response word 1 */
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t head_room;
+ uint16_t tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+ /* cmd word 0 */
+ uint8_t qtype;
+ uint8_t pad0[3];
+ uint16_t options;
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ uint8_t flags;
+ uint8_t pad1;
+ /* cmd word 1 */
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t head_room;
+ uint16_t tail_room;
+};
+
+struct dpni_cmd_set_offload {
+ uint8_t pad[3];
+ uint8_t dpni_offload;
+ uint32_t config;
+};
+
+struct dpni_cmd_get_offload {
+ uint8_t pad[3];
+ uint8_t dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+ uint32_t pad;
+ uint32_t config;
+};
+
+struct dpni_cmd_get_qdid {
+ uint8_t qtype;
+};
+
+struct dpni_rsp_get_qdid {
+ uint16_t qdid;
+};
+
+struct dpni_rsp_get_sp_info {
+ uint16_t spids[2];
+};
+
+struct dpni_rsp_get_tx_data_offset {
+ uint16_t data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+ uint8_t page_number;
+ uint8_t param;
+};
+
+struct dpni_rsp_get_statistics {
+ uint64_t counter[7];
+};
+
+struct dpni_cmd_set_link_cfg {
+ uint64_t pad0;
+ uint32_t rate;
+ uint32_t pad1;
+ uint64_t options;
+};
+
+#define DPNI_LINK_STATE_SHIFT 0
+#define DPNI_LINK_STATE_SIZE 1
+
+struct dpni_rsp_get_link_state {
+ uint32_t pad0;
+ /* from LSB: up:1 */
+ uint8_t flags;
+ uint8_t pad1[3];
+ uint32_t rate;
+ uint32_t pad2;
+ uint64_t options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+ uint16_t max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+ uint16_t max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+ uint8_t enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+ uint8_t enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+ uint8_t enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+ uint16_t pad;
+ uint8_t mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT 0
+#define DPNI_UNICAST_FILTERS_SIZE 1
+#define DPNI_MULTICAST_FILTERS_SHIFT 1
+#define DPNI_MULTICAST_FILTERS_SIZE 1
+
+struct dpni_cmd_clear_mac_filters {
+ /* from LSB: unicast:1, multicast:1 */
+ uint8_t flags;
+};
+
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ uint8_t en;
+};
+
+struct dpni_cmd_vlan_id {
+ uint32_t pad;
+ uint16_t vlan_id;
+};
+
+#define DPNI_SEPARATE_GRP_SHIFT 0
+#define DPNI_SEPARATE_GRP_SIZE 1
+#define DPNI_MODE_1_SHIFT 0
+#define DPNI_MODE_1_SIZE 4
+#define DPNI_MODE_2_SHIFT 4
+#define DPNI_MODE_2_SIZE 4
+
+struct dpni_cmd_set_tx_priorities {
+ uint16_t flags;
+ uint8_t prio_group_A;
+ uint8_t prio_group_B;
+ uint32_t pad0;
+ uint8_t modes[4];
+ uint32_t pad1;
+ uint64_t pad2;
+ uint16_t delta_bandwidth[8];
+};
+
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
+#define DPNI_MISS_ACTION_SIZE 4
+#define DPNI_KEEP_HASH_KEY_SHIFT 7
+#define DPNI_KEEP_HASH_KEY_SIZE 1
+
+struct dpni_cmd_set_rx_tc_dist {
+ uint16_t dist_size;
+ uint8_t tc_id;
+ /* from LSB: dist_mode:4, miss_action:4 */
+ uint8_t flags;
+ uint8_t pad0;
+ /* only the LSB */
+ uint8_t keep_hash_key;
+ uint16_t default_flow_id;
+ uint64_t pad1[5];
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_get_queue {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_STASH_CTRL_SHIFT 6
+#define DPNI_STASH_CTRL_SIZE 1
+#define DPNI_HOLD_ACTIVE_SHIFT 7
+#define DPNI_HOLD_ACTIVE_SIZE 1
+
+struct dpni_rsp_get_queue {
+ /* response word 0 */
+ uint64_t pad0;
+ /* response word 1 */
+ uint32_t dest_id;
+ uint16_t pad1;
+ uint8_t dest_prio;
+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+ uint8_t flags;
+ /* response word 2 */
+ uint64_t flc;
+ /* response word 3 */
+ uint64_t user_context;
+ /* response word 4 */
+ uint32_t fqid;
+ uint16_t qdbin;
+};
+
+struct dpni_cmd_set_queue {
+ /* cmd word 0 */
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+ uint8_t options;
+ uint32_t pad0;
+ /* cmd word 1 */
+ uint32_t dest_id;
+ uint16_t pad1;
+ uint8_t dest_prio;
+ uint8_t flags;
+ /* cmd word 2 */
+ uint64_t flc;
+ /* cmd word 3 */
+ uint64_t user_context;
+};
+
+#define DPNI_DROP_ENABLE_SHIFT 0
+#define DPNI_DROP_ENABLE_SIZE 1
+#define DPNI_DROP_UNITS_SHIFT 2
+#define DPNI_DROP_UNITS_SIZE 2
+
+struct dpni_early_drop {
+ /* from LSB: enable:1 units:2 */
+ uint8_t flags;
+ uint8_t pad0[3];
+ uint32_t pad1;
+ uint8_t green_drop_probability;
+ uint8_t pad2[7];
+ uint64_t green_max_threshold;
+ uint64_t green_min_threshold;
+ uint64_t pad3;
+ uint8_t yellow_drop_probability;
+ uint8_t pad4[7];
+ uint64_t yellow_max_threshold;
+ uint64_t yellow_min_threshold;
+ uint64_t pad5;
+ uint8_t red_drop_probability;
+ uint8_t pad6[7];
+ uint64_t red_max_threshold;
+ uint64_t red_min_threshold;
+};
+
+struct dpni_cmd_early_drop {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t pad[6];
+ uint64_t early_drop_iova;
+};
+
+struct dpni_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+struct dpni_cmd_get_taildrop {
+ uint8_t congestion_point;
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+};
+
+struct dpni_rsp_get_taildrop {
+ /* cmd word 0 */
+ uint64_t pad0;
+ /* cmd word 1 */
+ /* from LSB: enable:1 oal_lo:7 */
+ uint8_t enable_oal_lo;
+ /* from LSB: oal_hi:5 */
+ uint8_t oal_hi;
+ uint8_t units;
+ uint8_t pad2;
+ uint32_t threshold;
+};
+
+#define DPNI_OAL_LO_SHIFT 1
+#define DPNI_OAL_LO_SIZE 7
+#define DPNI_OAL_HI_SHIFT 0
+#define DPNI_OAL_HI_SIZE 5
+
+struct dpni_cmd_set_taildrop {
+ /* cmd word 0 */
+ uint8_t congestion_point;
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t index;
+ uint32_t pad0;
+ /* cmd word 1 */
+ /* from LSB: enable:1 oal_lo:7 */
+ uint8_t enable_oal_lo;
+ /* from LSB: oal_hi:5 */
+ uint8_t oal_hi;
+ uint8_t units;
+ uint8_t pad2;
+ uint32_t threshold;
+};
+
+struct dpni_tx_confirmation_mode {
+ uint32_t pad;
+ uint8_t confirmation_mode;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ uint8_t qtype;
+ uint8_t tc;
+ uint8_t pad[6];
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+struct dpni_cmd_get_congestion_notification {
+ uint8_t qtype;
+ uint8_t tc;
+};
+
+struct dpni_rsp_get_congestion_notification {
+ uint64_t pad;
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
index ef7e4dac..dbec306a 100644
--- a/drivers/net/dpaa2/mc/fsl_net.h
+++ b/drivers/net/dpaa2/mc/fsl_net.h
@@ -213,7 +213,7 @@
#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-#define NH_FLD_SCTP_CHUNK_DATA_BEGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
index 8591cc0b..09f4364b 100644
--- a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
+++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
@@ -2,3 +2,11 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_17.11 {
+ global:
+
+ dpaa2_eth_eventq_attach;
+ dpaa2_eth_eventq_detach;
+
+} DPDK_17.05;
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ffdf36d3..3f0344b4 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -38,6 +38,9 @@ LIB = librte_pmd_e1000.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_e1000_version.map
@@ -68,7 +71,7 @@ endif
# Add extra flags for base driver files (also known as shared code)
# to disable warnings in them
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 3d4ab936..a0c3b4dc 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -43,11 +43,11 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -72,7 +72,7 @@ static void eth_em_allmulticast_enable(struct rte_eth_dev *dev);
static void eth_em_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_em_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void eth_em_stats_get(struct rte_eth_dev *dev,
+static int eth_em_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static void eth_em_stats_reset(struct rte_eth_dev *dev);
static void eth_em_infos_get(struct rte_eth_dev *dev,
@@ -99,7 +99,7 @@ static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev);
static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev);
static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev);
@@ -341,7 +341,6 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
@@ -432,7 +431,8 @@ static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_em_pmd = {
.id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_em_pci_probe,
.remove = eth_em_pci_remove,
};
@@ -668,7 +668,12 @@ eth_em_start(struct rte_eth_dev *dev)
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
- eth_em_vlan_offload_set(dev, mask);
+ ret = eth_em_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to update vlan offload");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
/* Set Interrupt Throttling Rate to maximum allowed value. */
E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);
@@ -906,7 +911,7 @@ em_hardware_init(struct e1000_hw *hw)
}
/* This function is based on em_update_stats_counters() in e1000/if_em.c */
-static void
+static int
eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1006,7 +1011,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
}
if (rte_stats == NULL)
- return;
+ return -EINVAL;
/* Rx Errors */
rte_stats->imissed = stats->mpc;
@@ -1021,6 +1026,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
+ return 0;
}
static void
@@ -1447,7 +1453,7 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL, reg);
}
-static void
+static int
eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if(mask & ETH_VLAN_STRIP_MASK){
@@ -1463,6 +1469,8 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
em_vlan_hw_filter_disable(dev);
}
+
+ return 0;
}
/*
@@ -1624,7 +1632,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
rte_em_dev_atomic_read_link_status(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
- dev->data->port_id, (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 31819c5b..1d8f0794 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -119,7 +119,7 @@ struct em_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -186,7 +186,7 @@ struct em_tx_queue {
/** Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -577,7 +577,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
@@ -675,7 +675,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
@@ -799,7 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
@@ -830,7 +830,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->ol_flags = rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
@@ -979,7 +979,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
@@ -1056,7 +1056,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
@@ -1289,7 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1416,7 +1416,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1652,7 +1652,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index e4f7a9fa..fdc139f3 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -43,11 +43,11 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -112,7 +112,7 @@ static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_igb_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void eth_igb_stats_get(struct rte_eth_dev *dev,
+static int eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igb_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -157,7 +157,7 @@ static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid_id);
-static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
@@ -188,7 +188,7 @@ static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_igbvf_link_update(struct e1000_hw *hw);
-static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
+static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -334,6 +334,8 @@ static const struct rte_pci_id pci_id_igb_map[] = {
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
@@ -816,7 +818,6 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
@@ -1051,7 +1052,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1166,7 +1166,8 @@ static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_igb_pmd = {
.id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_igb_pci_probe,
.remove = eth_igb_pci_remove,
};
@@ -1189,7 +1190,7 @@ static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
*/
static struct rte_pci_driver rte_igbvf_pmd = {
.id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_igbvf_pci_probe,
.remove = eth_igbvf_pci_remove,
};
@@ -1400,7 +1401,12 @@ eth_igb_start(struct rte_eth_dev *dev)
*/
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
- eth_igb_vlan_offload_set(dev, mask);
+ ret = eth_igb_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set vlan offload");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
/* Enable VLAN filter since VMDq always use VLAN filter */
@@ -1828,7 +1834,7 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
}
-static void
+static int
eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1838,7 +1844,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
igb_read_stats_registers(hw, stats);
if (rte_stats == NULL)
- return;
+ return -EINVAL;
/* Rx Errors */
rte_stats->imissed = stats->mpc;
@@ -1853,6 +1859,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
+ return 0;
}
static void
@@ -2093,7 +2100,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return IGBVF_NB_XSTATS;
}
-static void
+static int
eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2103,12 +2110,13 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
igbvf_read_stats_registers(hw, hw_stats);
if (rte_stats == NULL)
- return;
+ return -EINVAL;
rte_stats->ipackets = hw_stats->gprc;
rte_stats->ibytes = hw_stats->gorc;
rte_stats->opackets = hw_stats->gptc;
rte_stats->obytes = hw_stats->gotc;
+ return 0;
}
static void
@@ -2715,7 +2723,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2 * VLAN_TAG_SIZE);
}
-static void
+static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if(mask & ETH_VLAN_STRIP_MASK){
@@ -2738,6 +2746,8 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
igb_vlan_hw_extend_disable(dev);
}
+
+ return 0;
}
@@ -4094,7 +4104,7 @@ eth_igb_get_flex_filter(struct rte_eth_dev *dev,
flex_filter.filter_info.priority = filter->priority;
memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
memcpy(flex_filter.filter_info.mask, filter->mask,
- RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
+ RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
&flex_filter.filter_info);
@@ -5382,7 +5392,14 @@ eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t mask = 1 << queue_id;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
E1000_WRITE_REG(hw, E1000_EIMC, mask);
E1000_WRITE_FLUSH(hw);
@@ -5397,7 +5414,12 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask = 1 << queue_id;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
uint32_t regval;
regval = E1000_READ_REG(hw, E1000_EIMS);
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index ed2ecc40..22bad265 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -47,7 +47,6 @@
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -1346,7 +1345,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
sizeof(struct igb_ntuple_filter_ele), 0);
- (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
@@ -1368,7 +1367,7 @@ igb_flow_create(struct rte_eth_dev *dev,
ethertype_filter_ptr = rte_zmalloc(
"igb_ethertype_filter",
sizeof(struct igb_ethertype_filter_ele), 0);
- (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
&ethertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
@@ -1388,7 +1387,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
syn_filter_ptr = rte_zmalloc("igb_syn_filter",
sizeof(struct igb_eth_syn_filter_ele), 0);
- (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&igb_filter_syn_list,
@@ -1409,7 +1408,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
flex_filter_ptr = rte_zmalloc("igb_flex_filter",
sizeof(struct igb_flex_filter_ele), 0);
- (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
sizeof(struct rte_eth_flex_filter));
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 6809d30c..cd6ae2fb 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -39,6 +39,7 @@
#include <stdarg.h>
#include <inttypes.h>
+#include <rte_bus_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 1c80a2a1..4ee12e9e 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -105,6 +105,13 @@ struct igb_tx_entry {
};
/**
+ * rx queue flags
+ */
+enum igb_rxq_flags {
+ IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
+};
+
+/**
* Structure associated with each RX queue.
*/
struct igb_rx_queue {
@@ -122,12 +129,13 @@ struct igb_rx_queue {
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint32_t flags; /**< RX flags. */
};
/**
@@ -191,7 +199,7 @@ struct igb_tx_queue {
/**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -589,7 +597,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
@@ -785,7 +793,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
@@ -917,7 +925,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -946,9 +954,17 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
+ */
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
@@ -1103,7 +1119,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
@@ -1180,10 +1196,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
*/
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ first_seg->vlan_tci =
+ rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ first_seg->vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
@@ -1530,7 +1553,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -1667,7 +1690,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
@@ -2180,7 +2203,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
@@ -2278,6 +2301,17 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
+ rxq->flags = 0;
+ /*
+ * i350 and i354 vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
if (ret)
@@ -2557,6 +2591,17 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
+ rxq->flags = 0;
+ /*
+ * i350VF LB vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_vfadapt_i350) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
if (ret)
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index bf1f5da0..f9bfe053 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -52,5 +52,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c
SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
CFLAGS += $(INCLUDES)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 71a8c1e2..accecf51 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -191,7 +191,7 @@ typedef uint64_t dma_addr_t;
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
- phys = mz->phys_addr; \
+ phys = mz->iova; \
handle = mz; \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
@@ -209,7 +209,7 @@ typedef uint64_t dma_addr_t;
mz = rte_memzone_reserve(z_name, size, node, 0); \
memset(mz->addr, 0, size); \
virt = mz->addr; \
- phys = mz->phys_addr; \
+ phys = mz->iova; \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 80ce1f35..22db8951 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -205,7 +205,7 @@ static void ena_init_rings(struct ena_adapter *adapter);
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ena_start(struct rte_eth_dev *dev);
static void ena_close(struct rte_eth_dev *dev);
-static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
static void ena_rx_queue_release(void *queue);
@@ -811,7 +811,7 @@ static void ena_stats_restart(struct rte_eth_dev *dev)
rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
}
-static void ena_stats_get(struct rte_eth_dev *dev,
+static int ena_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
struct ena_admin_basic_stats ena_stats;
@@ -821,13 +821,13 @@ static void ena_stats_get(struct rte_eth_dev *dev,
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -ENOTSUP;
memset(&ena_stats, 0, sizeof(ena_stats));
rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
if (unlikely(rc)) {
RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA");
- return;
+ return rc;
}
/* Set of basic statistics from ENA */
@@ -846,6 +846,7 @@ static void ena_stats_get(struct rte_eth_dev *dev,
stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
+ return 0;
}
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
@@ -1166,7 +1167,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
/* prepare physical address for DMA transaction */
- ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
@@ -1725,7 +1726,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* consideration pushed header
*/
if (mbuf->data_len > ena_tx_ctx.header_len) {
- ebuf->paddr = mbuf->buf_physaddr +
+ ebuf->paddr = mbuf->buf_iova +
mbuf->data_off +
ena_tx_ctx.header_len;
ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
@@ -1734,7 +1735,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
while ((mbuf = mbuf->next) != NULL) {
- ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;
+ ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
ebuf->len = mbuf->data_len;
ebuf++;
tx_info->num_of_bufs++;
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index dc3080ff..be8bc9fa 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -35,6 +35,7 @@
#define _ENA_ETHDEV_H_
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include "ena_com.h"
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index db48ff2d..5191db54 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -45,6 +45,9 @@ CFLAGS += -I$(SRCDIR)/base/
CFLAGS += -I$(SRCDIR)
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -Wno-strict-aliasing
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
VPATH += $(SRCDIR)/src
diff --git a/drivers/net/enic/base/vnic_cq.c b/drivers/net/enic/base/vnic_cq.c
index 2f65f357..3549fece 100644
--- a/drivers/net/enic/base/vnic_cq.c
+++ b/drivers/net/enic/base/vnic_cq.c
@@ -35,16 +35,6 @@
#include "vnic_dev.h"
#include "vnic_cq.h"
-int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
- unsigned int desc_size)
-{
- int mem_size;
-
- mem_size = vnic_dev_desc_ring_size(&cq->ring, desc_count, desc_size);
-
- return mem_size;
-}
-
void vnic_cq_free(struct vnic_cq *cq)
{
vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
@@ -65,11 +55,11 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- pr_err("Failed to hook CQ[%d] resource\n", index);
+ pr_err("Failed to hook CQ[%u] resource\n", index);
return -EINVAL;
}
- snprintf(res_name, sizeof(res_name), "%d-cq-%d", instance++, index);
+ snprintf(res_name, sizeof(res_name), "%d-cq-%u", instance++, index);
err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size,
socket_id, res_name);
if (err)
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 49b36555..9b25d219 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -272,7 +272,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
__attribute__((unused)) unsigned int socket_id,
char *z_name)
{
- void *alloc_addr = NULL;
+ void *alloc_addr;
dma_addr_t alloc_pa = 0;
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
@@ -443,24 +443,6 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
return err;
}
-void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
-{
- vdev->proxy = PROXY_BY_INDEX;
- vdev->proxy_index = index;
-}
-
-void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
-{
- vdev->proxy = PROXY_BY_BDF;
- vdev->proxy_index = bdf;
-}
-
-void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
-{
- vdev->proxy = PROXY_NONE;
- vdev->proxy_index = 0;
-}
-
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
@@ -650,7 +632,7 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
if (!vdev->stats) {
snprintf((char *)name, sizeof(name),
- "vnic_stats-%d", instance++);
+ "vnic_stats-%u", instance++);
vdev->stats = vdev->alloc_consistent(vdev->priv,
sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
if (!vdev->stats)
@@ -672,15 +654,6 @@ int vnic_dev_close(struct vnic_dev *vdev)
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
-/** Deprecated. @see vnic_dev_enable_wait */
-int vnic_dev_enable(struct vnic_dev *vdev)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
-
- return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
-}
-
int vnic_dev_enable_wait(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
@@ -725,31 +698,6 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
return 0;
}
-int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
-{
- u64 a0 = (u32)arg, a1 = 0;
- int wait = 1000;
-
- return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
-}
-
-int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
- int err;
-
- *done = 0;
-
- err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
- if (err)
- return err;
-
- *done = (a0 == 0);
-
- return 0;
-}
-
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0 = 0, a1 = 0;
@@ -840,19 +788,6 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
return 0;
}
-int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
-{
- u64 a0 = intr, a1 = 0;
- int wait = 1000;
- int err;
-
- err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
- if (err)
- pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
-
- return err;
-}
-
void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
{
vdev->in_reset = state;
@@ -900,7 +835,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
}
if (!vnic_dev_in_reset(vdev)) {
snprintf((char *)name, sizeof(name),
- "vnic_notify-%d", instance++);
+ "vnic_notify-%u", instance++);
notify_addr = vdev->alloc_consistent(vdev->priv,
sizeof(struct vnic_devcmd_notify),
&notify_pa, (u8 *)name);
@@ -985,14 +920,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return r;
}
-int vnic_dev_deinit(struct vnic_dev *vdev)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
-
- return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
-}
-
void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
{
/* Default: hardware intr coal timer is in units of 1.5 usecs */
@@ -1018,18 +945,6 @@ u32 vnic_dev_port_speed(struct vnic_dev *vdev)
return vdev->notify_copy.port_speed;
}
-void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
- enum vnic_dev_intr_mode intr_mode)
-{
- vdev->intr_mode = intr_mode;
-}
-
-enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
- struct vnic_dev *vdev)
-{
- return vdev->intr_mode;
-}
-
u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
{
return (usec * vdev->intr_coal_timer_info.mul) /
@@ -1063,7 +978,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->free_consistent(vdev->priv,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
- kfree(vdev);
+ rte_free(vdev);
}
}
@@ -1072,7 +987,13 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
unsigned int num_bars)
{
if (!vdev) {
- vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ char name[NAME_MAX];
+ snprintf((char *)name, sizeof(name), "%s-vnic",
+ pdev->device.name);
+ vdev = (struct vnic_dev *)rte_zmalloc_socket(name,
+ sizeof(struct vnic_dev),
+ RTE_CACHE_LINE_SIZE,
+ pdev->device.numa_node);
if (!vdev)
return NULL;
}
@@ -1094,23 +1015,6 @@ err_out:
return NULL;
}
-struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev)
-{
- return vdev->pdev;
-}
-
-int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
-{
- u64 a0, a1 = 0;
- int wait = 1000;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = mac_addr[i];
-
- return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
-}
-
/*
* vnic_dev_classifier: Add/Delete classifier entries
* @vdev: vdev of the device
@@ -1150,7 +1054,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
tlv_size = filter_size + action_size +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
- "vnic_clsf_%d", unique_id++);
+ "vnic_clsf_%u", unique_id++);
tlv_va = vdev->alloc_consistent(vdev->priv,
tlv_size, &tlv_pa, (u8 *)z_name);
if (!tlv_va)
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 9a9e6917..c9ca25b3 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -35,8 +35,10 @@
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
#include "enic_compat.h"
-#include "rte_pci.h"
#include "vnic_resource.h"
#include "vnic_devcmd.h"
diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
index 10a40c1b..ea297eef 100644
--- a/drivers/net/enic/base/vnic_rq.c
+++ b/drivers/net/enic/base/vnic_rq.c
@@ -58,13 +58,13 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- pr_err("Failed to hook RQ[%d] resource\n", index);
+ pr_err("Failed to hook RQ[%u] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
- snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
+ snprintf(res_name, sizeof(res_name), "%d-rq-%u", instance++, index);
rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
rq->socket_id, res_name);
return rc;
@@ -118,11 +118,6 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
rq->pkt_last_seg = NULL;
}
-void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
-{
- iowrite32(error, &rq->ctrl->error_status);
-}
-
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
diff --git a/drivers/net/enic/base/vnic_rss.c b/drivers/net/enic/base/vnic_rss.c
index 1cf055b0..87d40c0d 100644
--- a/drivers/net/enic/base/vnic_rss.c
+++ b/drivers/net/enic/base/vnic_rss.c
@@ -50,35 +50,3 @@ void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key)
}
}
-void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu)
-{
- u32 i;
- u32 *p = (u32 *)cpu;
-
- for (i = 0; i < 32; ++i)
- iowrite32(*p++, &rss_cpu->cpu[i].b[0]);
-}
-
-void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key)
-{
- u32 i;
- u32 *p;
- u16 *q;
-
- for (i = 0; i < 4; ++i) {
- p = (u32 *)(key + (10 * i));
- *p++ = ioread32(&rss_key->key[i].b[0]);
- *p++ = ioread32(&rss_key->key[i].b[4]);
- q = (u16 *)p;
- *q = (u16)ioread32(&rss_key->key[i].b[8]);
- }
-}
-
-void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu)
-{
- u32 i;
- u32 *p = (u32 *)cpu;
-
- for (i = 0; i < 32; ++i)
- *p++ = ioread32(&rss_cpu->cpu[i].b[0]);
-}
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index 7c4119c3..0a1247f4 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -52,7 +52,7 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
char res_name[NAME_MAX];
static int instance;
- snprintf(res_name, sizeof(res_name), "%d-wq-%d", instance++, wq->index);
+ snprintf(res_name, sizeof(res_name), "%d-wq-%u", instance++, wq->index);
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size,
wq->socket_id, res_name);
}
@@ -145,11 +145,6 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
wq->last_completed_index = 0;
}
-void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
-{
- iowrite32(error, &wq->ctrl->error_status);
-}
-
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e28f2235..e36ec385 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -227,11 +227,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
return enic->rq_count + wq;
}
-static inline unsigned int enic_msix_err_intr(__rte_unused struct enic *enic)
-{
- return 0;
-}
-
static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
{
return (struct enic *)eth_dev->data->dev_private;
@@ -287,7 +282,7 @@ extern int enic_enable(struct enic *enic);
extern int enic_disable(struct enic *enic);
extern void enic_remove(struct enic *enic);
extern int enic_get_link_status(struct enic *enic);
-extern void enic_dev_stats_get(struct enic *enic,
+extern int enic_dev_stats_get(struct enic *enic,
struct rte_eth_stats *r_stats);
extern void enic_dev_stats_clear(struct enic *enic);
extern void enic_add_packet_filter(struct enic *enic);
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index fc58bb41..1cb5686f 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -99,11 +99,6 @@ static inline uint32_t ioread32(volatile void *addr)
return rte_read32(addr);
}
-static inline uint16_t ioread16(volatile void *addr)
-{
- return rte_read16(addr);
-}
-
static inline uint8_t ioread8(volatile void *addr)
{
return rte_read8(addr);
@@ -119,26 +114,11 @@ static inline void iowrite32_relaxed(uint32_t val, volatile void *addr)
rte_write32_relaxed(val, addr);
}
-static inline void iowrite16(uint16_t val, volatile void *addr)
-{
- rte_write16(val, addr);
-}
-
-static inline void iowrite8(uint8_t val, volatile void *addr)
-{
- rte_write8(val, addr);
-}
-
static inline unsigned int readl(volatile void __iomem *addr)
{
return rte_read32(addr);
}
-static inline unsigned int readl_relaxed(volatile void __iomem *addr)
-{
- return rte_read32_relaxed(addr);
-}
-
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
rte_write32(val, addr);
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index da8fec2d..669dbf33 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -37,6 +37,7 @@
#include <rte_dev.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
@@ -142,6 +143,10 @@ enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
static void enicpmd_dev_tx_queue_release(void *txq)
{
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
enic_free_wq(txq);
}
@@ -196,6 +201,9 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
if (queue_idx >= ENIC_WQ_MAX) {
dev_err(enic,
@@ -272,6 +280,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
static void enicpmd_dev_rx_queue_release(void *rxq)
{
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
enic_free_rq(rxq);
}
@@ -310,6 +322,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
/* With Rx scatter support, two RQs are now used on VIC per RQ used
* by the application.
*/
@@ -347,7 +363,7 @@ static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
return err;
}
-static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct enic *enic = pmd_priv(eth_dev);
@@ -371,6 +387,8 @@ static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
dev_warning(enic,
"Configuration of extended VLAN is not supported\n");
}
+
+ return 0;
}
static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
@@ -378,6 +396,9 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
ret = enic_set_vnic_res(enic);
if (ret) {
@@ -392,9 +413,10 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_conf.rxmode.split_hdr_size);
}
- enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
- return 0;
+ ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
+
+ return ret;
}
/* Start the device.
@@ -404,6 +426,9 @@ static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
return enic_enable(enic);
}
@@ -416,6 +441,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
struct rte_eth_link link;
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic_disable(enic);
memset(&link, 0, sizeof(link));
@@ -444,13 +472,13 @@ static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
return enic_link_update(enic);
}
-static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *stats)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_dev_stats_get(enic, stats);
+ return enic_dev_stats_get(enic, stats);
}
static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
@@ -513,7 +541,11 @@ static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
+
enic->promisc = 1;
enic_add_packet_filter(enic);
}
@@ -522,6 +554,9 @@ static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->promisc = 0;
enic_add_packet_filter(enic);
@@ -531,6 +566,9 @@ static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->allmulti = 1;
enic_add_packet_filter(enic);
@@ -540,6 +578,9 @@ static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic->allmulti = 0;
enic_add_packet_filter(enic);
@@ -551,6 +592,9 @@ static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
ENICPMD_FUNC_TRACE();
return enic_set_mac_address(enic, mac_addr->addr_bytes);
}
@@ -559,6 +603,9 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct enic *enic = pmd_priv(eth_dev);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
ENICPMD_FUNC_TRACE();
enic_del_mac_address(enic, index);
}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 40dbec7f..8af0ccd3 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -40,6 +40,7 @@
#include <libgen.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -156,16 +157,17 @@ void enic_dev_stats_clear(struct enic *enic)
enic_clear_soft_stats(enic);
}
-void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
+int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
{
struct vnic_stats *stats;
struct enic_soft_stats *soft_stats = &enic->soft_stats;
int64_t rx_truncated;
uint64_t rx_packet_errors;
+ int ret = vnic_dev_stats_dump(enic->vdev, &stats);
- if (vnic_dev_stats_dump(enic->vdev, &stats)) {
+ if (ret) {
dev_err(enic, "Error in getting stats\n");
- return;
+ return ret;
}
/* The number of truncated packets can only be calculated by
@@ -191,6 +193,7 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
+ return 0;
}
void enic_del_mac_address(struct enic *enic, int mac_index)
@@ -224,7 +227,7 @@ enic_free_rq_buf(struct rte_mbuf **mbuf)
return;
rte_pktmbuf_free(*mbuf);
- mbuf = NULL;
+ *mbuf = NULL;
}
void enic_init_vnic_resources(struct enic *enic)
@@ -280,7 +283,7 @@ void enic_init_vnic_resources(struct enic *enic)
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->phys_addr);
+ (u64)enic->wq[index].cqmsg_rz->iova);
}
vnic_intr_init(&enic->intr,
@@ -313,7 +316,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
}
mb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(mb->buf_physaddr
+ dma_addr = (dma_addr_t)(mb->buf_iova
+ RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
@@ -359,7 +362,7 @@ enic_alloc_consistent(void *priv, size_t size,
}
vaddr = rz->addr;
- *dma_handle = (dma_addr_t)rz->phys_addr;
+ *dma_handle = (dma_addr_t)rz->iova;
mze = rte_malloc("enic memzone entry",
sizeof(struct enic_memzone_entry), 0);
@@ -368,6 +371,7 @@ enic_alloc_consistent(void *priv, size_t size,
pr_err("%s : Failed to allocate memory for memzone list\n",
__func__);
rte_memzone_free(rz);
+ return NULL;
}
mze->rz = rz;
@@ -391,7 +395,7 @@ enic_free_consistent(void *priv,
rte_spinlock_lock(&enic->memzone_list_lock);
LIST_FOREACH(mze, &enic->memzone_list, entries) {
if (mze->rz->addr == vaddr &&
- mze->rz->phys_addr == dma_handle)
+ mze->rz->iova == dma_handle)
break;
}
if (mze == NULL) {
@@ -1116,11 +1120,12 @@ static int
enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
{
struct vnic_rq *sop_rq, *data_rq;
- unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ unsigned int cq_idx;
int rc = 0;
sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+ cq_idx = rq_idx;
vnic_cq_clean(&enic->cq[cq_idx]);
vnic_cq_init(&enic->cq[cq_idx],
@@ -1180,6 +1185,9 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
@@ -1331,6 +1339,10 @@ int enic_probe(struct enic *enic)
dev_debug(enic, " Initializing ENIC PMD\n");
+ /* if this is a secondary process the hardware is already initialized */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
enic->bar0.len = pdev->mem_resource[0].len;
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index a39172f1..a3663d51 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -243,7 +243,7 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
/* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
- pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
if (vlan_tci != 0)
@@ -386,7 +386,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Push descriptor for newly allocated mbuf */
nmb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(nmb->buf_physaddr +
+ dma_addr = (dma_addr_t)(nmb->buf_iova +
RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd_ptr, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
@@ -546,12 +546,15 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t bus_addr;
uint8_t offload_mode;
uint16_t header_len;
+ uint64_t tso;
+ rte_atomic64_t *tx_oversized;
enic_cleanup_wq(enic, wq);
wq_desc_avail = vnic_wq_desc_avail(wq);
head_idx = wq->head_idx;
desc_count = wq->ring.desc_count;
ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+ tx_oversized = &enic->soft_stats.tx_oversized;
nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
@@ -561,10 +564,12 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
data_len = tx_pkt->data_len;
ol_flags = tx_pkt->ol_flags;
nb_segs = tx_pkt->nb_segs;
+ tso = ol_flags & PKT_TX_TCP_SEG;
- if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
+ /* drop packet if it's too big to send */
+ if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
rte_pktmbuf_free(tx_pkt);
- rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ rte_atomic64_inc(tx_oversized);
continue;
}
@@ -578,7 +583,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
vlan_id = 0;
vlan_tag_insert = 0;
bus_addr = (dma_addr_t)
- (tx_pkt->buf_physaddr + tx_pkt->data_off);
+ (tx_pkt->buf_iova + tx_pkt->data_off);
descs = (struct wq_enet_desc *)wq->ring.descs;
desc_p = descs + head_idx;
@@ -587,13 +592,21 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
header_len = 0;
- if (tx_pkt->tso_segsz) {
+ if (tso) {
header_len = tso_header_len(tx_pkt);
- if (header_len) {
- offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
- mss = tx_pkt->tso_segsz;
+
+ /* Drop if non-TCP packet or TSO seg size is too big */
+ if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
+ header_len) > ENIC_TX_MAX_PKT_SIZE))) {
+ rte_pktmbuf_free(tx_pkt);
+ rte_atomic64_inc(tx_oversized);
+ continue;
}
+
+ offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+ mss = tx_pkt->tso_segsz;
}
+
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
@@ -630,7 +643,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (tx_pkt->next == NULL)
eop = 1;
desc_p = descs + head_idx;
- bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
+ bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
index d516d362..ea2a8fe4 100644
--- a/drivers/net/failsafe/Makefile
+++ b/drivers/net/failsafe/Makefile
@@ -58,5 +58,8 @@ CFLAGS += -D_XOPEN_SOURCE=700
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
CFLAGS += -pedantic
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 6006bef8..6bc5abac 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -37,7 +37,7 @@
#include <rte_ethdev_vdev.h>
#include <rte_devargs.h>
#include <rte_kvargs.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include "failsafe_private.h"
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 1f22416f..cfc83e36 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -115,8 +115,7 @@ fs_execute_cmd(struct sub_device *sdev, char *cmdline)
/* store possible newline as well */
char output[DEVARGS_MAXLEN + 1];
size_t len;
- int old_err;
- int ret, pclose_ret;
+ int ret;
RTE_ASSERT(cmdline != NULL || sdev->cmdline != NULL);
if (sdev->cmdline == NULL) {
@@ -135,12 +134,10 @@ fs_execute_cmd(struct sub_device *sdev, char *cmdline)
sdev->cmdline[i] = ' ';
}
DEBUG("'%s'", sdev->cmdline);
- old_err = errno;
fp = popen(sdev->cmdline, "r");
if (fp == NULL) {
- ret = errno;
+ ret = -errno;
ERROR("popen: %s", strerror(errno));
- errno = old_err;
return ret;
}
/* We only read one line */
@@ -155,18 +152,11 @@ fs_execute_cmd(struct sub_device *sdev, char *cmdline)
goto ret_pclose;
}
ret = fs_parse_device(sdev, output);
- if (ret) {
+ if (ret)
ERROR("Parsing device '%s' failed", output);
- goto ret_pclose;
- }
ret_pclose:
- pclose_ret = pclose(fp);
- if (pclose_ret) {
- pclose_ret = errno;
+ if (pclose(fp) == -1)
ERROR("pclose: %s", strerror(errno));
- errno = old_err;
- return pclose_ret;
- }
return ret;
}
@@ -286,10 +276,17 @@ fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
ERROR("Invalid parameter");
return -EINVAL;
}
- if (params[b] == ',' || params[b] == '\0')
- i += snprintf(&buffer[i], b - a + 1, "%s", &params[a]);
- if (params[b] == '(') {
+ if (params[b] == ',' || params[b] == '\0') {
+ size_t len = b - a;
+
+ if (i > 0)
+ len += 1;
+ snprintf(&buffer[i], len + 1, "%s%s",
+ i ? "," : "", &params[a]);
+ i += len;
+ } else if (params[b] == '(') {
size_t start = b;
+
b += closing_paren(&params[b]);
if (b == start)
return -EINVAL;
@@ -393,6 +390,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
&dev->data->mac_addrs[0]);
if (ret < 0)
goto free_kvlist;
+
mac_from_arg = 1;
}
}
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index c8f4318e..19d26f53 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -41,6 +41,7 @@ fs_bus_init(struct rte_eth_dev *dev)
struct sub_device *sdev;
struct rte_devargs *da;
uint8_t i;
+ uint16_t j;
int ret;
FOREACH_SUBDEV(sdev, i, dev) {
@@ -57,7 +58,13 @@ fs_bus_init(struct rte_eth_dev *dev)
rte_errno ? ")" : "");
continue;
}
- ETH(sdev) = rte_eth_dev_allocated(da->name);
+ RTE_ETH_FOREACH_DEV(j) {
+ if (strcmp(rte_eth_devices[j].device->name,
+ da->name) == 0) {
+ ETH(sdev) = &rte_eth_devices[j];
+ break;
+ }
+ }
if (ETH(sdev) == NULL) {
ERROR("sub_device %d init went wrong", i);
return -ENODEV;
@@ -90,19 +97,20 @@ fs_bus_uninit(struct rte_eth_dev *dev)
{
struct sub_device *sdev = NULL;
uint8_t i;
- int ret;
+ int sdev_ret;
+ int ret = 0;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
- ret = rte_eal_hotplug_remove(sdev->bus->name,
- sdev->dev->name);
- if (ret) {
- ERROR("Failed to remove requested device %s",
- sdev->dev->name);
+ sdev_ret = rte_eal_hotplug_remove(sdev->bus->name,
+ sdev->dev->name);
+ if (sdev_ret) {
+ ERROR("Failed to remove requested device %s (err: %d)",
+ sdev->dev->name, sdev_ret);
continue;
}
sdev->state = DEV_PROBED - 1;
}
- return 0;
+ return ret;
}
int
@@ -111,8 +119,6 @@ failsafe_eal_uninit(struct rte_eth_dev *dev)
int ret;
ret = fs_bus_uninit(dev);
- if (ret)
- return ret;
PRIV(dev)->state = DEV_PROBED - 1;
- return 0;
+ return ret;
}
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index a3a8cce9..21392e5a 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -35,6 +35,7 @@
#include <rte_flow.h>
#include <rte_flow_driver.h>
+#include <rte_cycles.h>
#include "failsafe_private.h"
@@ -203,6 +204,7 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
ERROR("Adding MAC address %s failed", ea_fmt);
+ return ret;
}
}
/* VLAN filter */
@@ -308,6 +310,28 @@ fs_dev_remove(struct sub_device *sdev)
failsafe_hotplug_alarm_install(sdev->fs_dev);
}
+static void
+fs_dev_stats_save(struct sub_device *sdev)
+{
+ struct rte_eth_stats stats;
+ int err;
+
+ /* Attempt to read current stats. */
+ err = rte_eth_stats_get(PORT_ID(sdev), &stats);
+ if (err) {
+ uint64_t timestamp = sdev->stats_snapshot.timestamp;
+
+ WARN("Could not access latest statistics from sub-device %d.\n",
+ SUB_ID(sdev));
+ if (timestamp != 0)
+ WARN("Using latest snapshot taken before %"PRIu64" seconds.\n",
+ (rte_rdtsc() - timestamp) / rte_get_tsc_hz());
+ }
+ failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator,
+ err ? &sdev->stats_snapshot.stats : &stats);
+ memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot));
+}
+
static inline int
fs_rxtx_clean(struct sub_device *sdev)
{
@@ -329,8 +353,10 @@ failsafe_dev_remove(struct rte_eth_dev *dev)
uint8_t i;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- if (sdev->remove && fs_rxtx_clean(sdev))
+ if (sdev->remove && fs_rxtx_clean(sdev)) {
+ fs_dev_stats_save(sdev);
fs_dev_remove(sdev);
+ }
}
int
@@ -399,8 +425,31 @@ err_remove:
return ret;
}
+void
+failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from)
+{
+ uint32_t i;
+
+ RTE_ASSERT(to != NULL && from != NULL);
+ to->ipackets += from->ipackets;
+ to->opackets += from->opackets;
+ to->ibytes += from->ibytes;
+ to->obytes += from->obytes;
+ to->imissed += from->imissed;
+ to->ierrors += from->ierrors;
+ to->oerrors += from->oerrors;
+ to->rx_nombuf += from->rx_nombuf;
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ to->q_ipackets[i] += from->q_ipackets[i];
+ to->q_opackets[i] += from->q_opackets[i];
+ to->q_ibytes[i] += from->q_ibytes[i];
+ to->q_obytes[i] += from->q_obytes[i];
+ to->q_errors[i] += from->q_errors[i];
+ }
+}
+
int
-failsafe_eth_rmv_event_callback(uint8_t port_id __rte_unused,
+failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type event __rte_unused,
void *cb_arg, void *out __rte_unused)
{
@@ -419,7 +468,7 @@ failsafe_eth_rmv_event_callback(uint8_t port_id __rte_unused,
}
int
-failsafe_eth_lsc_event_callback(uint8_t port_id __rte_unused,
+failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type event __rte_unused,
void *cb_arg, void *out __rte_unused)
{
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index ff9ad155..e16a5903 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -38,6 +38,7 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_flow.h>
+#include <rte_cycles.h>
#include "failsafe_private.h"
@@ -79,132 +80,14 @@ static struct rte_eth_dev_info default_infos = {
.flow_type_rss_offloads = 0x0,
};
-/**
- * Check whether a specific offloading capability
- * is supported by a sub_device.
- *
- * @return
- * 0: all requested capabilities are supported by the sub_device
- * positive value: This flag at least is not supported by the sub_device
- */
-static int
-fs_port_offload_validate(struct rte_eth_dev *dev,
- struct sub_device *sdev)
-{
- struct rte_eth_dev_info infos = {0};
- struct rte_eth_conf *cf;
- uint32_t cap;
-
- cf = &dev->data->dev_conf;
- SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
- /* RX capabilities */
- cap = infos.rx_offload_capa;
- if (cf->rxmode.hw_vlan_strip &&
- ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
- WARN("VLAN stripping offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_VLAN_STRIP;
- }
- if (cf->rxmode.hw_ip_checksum &&
- ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM)) !=
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))) {
- WARN("IP checksum offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- }
- if (cf->rxmode.enable_lro &&
- ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
- WARN("TCP LRO offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_TCP_LRO;
- }
- if (cf->rxmode.hw_vlan_extend &&
- ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
- WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
- SUB_ID(sdev));
- return DEV_RX_OFFLOAD_QINQ_STRIP;
- }
- /* TX capabilities */
- /* Nothing to do, no tx capa supported */
- return 0;
-}
-
-/*
- * Disable the dev_conf flag related to an offload capability flag
- * within an ethdev configuration.
- */
-static int
-fs_port_disable_offload(struct rte_eth_conf *cf,
- uint32_t ol_cap)
-{
- switch (ol_cap) {
- case DEV_RX_OFFLOAD_VLAN_STRIP:
- INFO("Disabling VLAN stripping offload");
- cf->rxmode.hw_vlan_strip = 0;
- break;
- case DEV_RX_OFFLOAD_IPV4_CKSUM:
- case DEV_RX_OFFLOAD_UDP_CKSUM:
- case DEV_RX_OFFLOAD_TCP_CKSUM:
- case (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM):
- INFO("Disabling IP checksum offload");
- cf->rxmode.hw_ip_checksum = 0;
- break;
- case DEV_RX_OFFLOAD_TCP_LRO:
- INFO("Disabling TCP LRO offload");
- cf->rxmode.enable_lro = 0;
- break;
- case DEV_RX_OFFLOAD_QINQ_STRIP:
- INFO("Disabling stacked VLAN stripping offload");
- cf->rxmode.hw_vlan_extend = 0;
- break;
- default:
- DEBUG("Unable to disable offload capability: %" PRIx32,
- ol_cap);
- return -1;
- }
- return 0;
-}
-
static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
uint8_t i;
- int capa_flag;
int ret;
FOREACH_SUBDEV(sdev, i, dev) {
- if (sdev->state != DEV_PROBED)
- continue;
- DEBUG("Checking capabilities for sub_device %d", i);
- while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
- /*
- * Refuse to change configuration if multiple devices
- * are present and we already have configured at least
- * some of them.
- */
- if (PRIV(dev)->state >= DEV_ACTIVE &&
- PRIV(dev)->subs_tail > 1) {
- ERROR("device already configured, cannot fix live configuration");
- return -1;
- }
- ret = fs_port_disable_offload(&dev->data->dev_conf,
- capa_flag);
- if (ret) {
- ERROR("Unable to disable offload capability");
- return ret;
- }
- }
- }
- FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
int lsc_enabled;
@@ -582,13 +465,30 @@ fs_link_update(struct rte_eth_dev *dev,
return -1;
}
-static void
+static int
fs_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
- if (TX_SUBDEV(dev) == NULL)
- return;
- rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
+ uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
+
+ ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
+ if (ret) {
+ ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
+ i, ret);
+ *timestamp = 0;
+ return ret;
+ }
+ *timestamp = rte_rdtsc();
+ failsafe_stats_increment(stats, snapshot);
+ }
+ return 0;
}
static void
@@ -597,8 +497,11 @@ fs_stats_reset(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
rte_eth_stats_reset(PORT_ID(sdev));
+ memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
+ }
+ memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
}
/**
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index 0361cf43..d81cc3ca 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -93,6 +93,11 @@ enum dev_state {
DEV_STARTED,
};
+struct fs_stats {
+ struct rte_eth_stats stats;
+ uint64_t timestamp;
+};
+
struct sub_device {
/* Exhaustive DPDK device description */
struct rte_devargs devargs;
@@ -102,6 +107,8 @@ struct sub_device {
uint8_t sid;
/* Device state machine */
enum dev_state state;
+ /* Last stats snapshot passed to user */
+ struct fs_stats stats_snapshot;
/* Some device are defined as a command line */
char *cmdline;
/* fail-safe device backreference */
@@ -140,6 +147,7 @@ struct fs_priv {
* synchronized state.
*/
enum dev_state state;
+ struct rte_eth_stats stats_accumulator;
unsigned int pending_alarm:1; /* An alarm is pending */
/* flow isolation state */
int flow_isolated:1;
@@ -180,10 +188,12 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev);
int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
void failsafe_dev_remove(struct rte_eth_dev *dev);
-int failsafe_eth_rmv_event_callback(uint8_t port_id,
+void failsafe_stats_increment(struct rte_eth_stats *to,
+ struct rte_eth_stats *from);
+int failsafe_eth_rmv_event_callback(uint16_t port_id,
enum rte_eth_event_type type,
void *arg, void *out);
-int failsafe_eth_lsc_event_callback(uint8_t port_id,
+int failsafe_eth_lsc_event_callback(uint16_t port_id,
enum rte_eth_event_type event,
void *cb_arg, void *out);
@@ -220,10 +230,10 @@ extern int mac_from_arg;
* dev: (struct rte_eth_dev *), fail-safe ethdev
* state: (enum dev_state), minimum acceptable device state
*/
-#define FOREACH_SUBDEV_STATE(s, i, dev, state) \
- for (i = fs_find_next((dev), 0, state); \
- i < PRIV(dev)->subs_tail && (s = &PRIV(dev)->subs[i]); \
- i = fs_find_next((dev), i + 1, state))
+#define FOREACH_SUBDEV_STATE(s, i, dev, state) \
+ for (s = fs_find_next((dev), 0, state, &i); \
+ s != NULL; \
+ s = fs_find_next((dev), i + 1, state, &i))
/**
* Iterator construct over fail-safe sub-devices:
@@ -294,18 +304,26 @@ extern int mac_from_arg;
/* inlined functions */
-static inline uint8_t
-fs_find_next(struct rte_eth_dev *dev, uint8_t sid,
- enum dev_state min_state)
+static inline struct sub_device *
+fs_find_next(struct rte_eth_dev *dev,
+ uint8_t sid,
+ enum dev_state min_state,
+ uint8_t *sid_out)
{
- while (sid < PRIV(dev)->subs_tail) {
- if (PRIV(dev)->subs[sid].state >= min_state)
+ struct sub_device *subs;
+ uint8_t tail;
+
+ subs = PRIV(dev)->subs;
+ tail = PRIV(dev)->subs_tail;
+ while (sid < tail) {
+ if (subs[sid].state >= min_state)
break;
sid++;
}
- if (sid >= PRIV(dev)->subs_tail)
- return PRIV(dev)->subs_tail;
- return sid;
+ *sid_out = sid;
+ if (sid >= tail)
+ return NULL;
+ return &subs[sid];
}
/*
@@ -334,7 +352,7 @@ fs_switch_dev(struct rte_eth_dev *dev,
} else if ((txd && txd->state < req_state) ||
txd == NULL ||
txd == banned) {
- struct sub_device *sdev;
+ struct sub_device *sdev = NULL;
uint8_t i;
/* Using acceptable device */
@@ -346,9 +364,10 @@ fs_switch_dev(struct rte_eth_dev *dev,
PRIV(dev)->subs_tx = i;
break;
}
- } else if (txd && txd->state < req_state) {
- DEBUG("No device ready, deactivating tx_dev");
- PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
+ if (i >= PRIV(dev)->subs_tail || sdev == NULL) {
+ DEBUG("No device ready, deactivating tx_dev");
+ PRIV(dev)->subs_tx = PRIV(dev)->subs_tail;
+ }
} else {
return;
}
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 73114215..70157c82 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -43,7 +43,8 @@ fs_rx_unsafe(struct sub_device *sdev)
{
return (ETH(sdev) == NULL) ||
(ETH(sdev)->rx_pkt_burst == NULL) ||
- (sdev->state != DEV_STARTED);
+ (sdev->state != DEV_STARTED) ||
+ (sdev->remove != 0);
}
static inline int
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index e0024f05..1129596f 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -76,11 +76,14 @@ CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
endif
endif
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
#
# Add extra flags for base driver source files to disable warnings in them
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 8e1a9506..1273aa86 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -155,6 +155,7 @@ struct fm10k_dev_info {
struct fm10k_macvlan_filter_info macvlan;
/* Flag to indicate if RX vector conditions satisfied */
bool rx_vec_allowed;
+ bool sm_down;
};
/*
@@ -204,7 +205,7 @@ struct fm10k_rx_queue {
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
uint16_t rxrearm_start; /* the idx we start the re-arming from */
uint16_t rx_using_sse; /* indicates that vector RX is in use */
- uint8_t port_id;
+ uint16_t port_id;
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
@@ -241,7 +242,7 @@ struct fm10k_tx_queue {
volatile uint32_t *tail_ptr;
uint32_t txq_flags; /* Holds flags for this TXq */
uint16_t nb_desc;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
uint16_t queue_id;
uint16_t tx_ftag_en; /* indicates FTAG TX supported */
@@ -252,11 +253,11 @@ struct fm10k_txq_ops {
};
#define MBUF_DMA_ADDR(mb) \
- ((uint64_t) ((mb)->buf_physaddr + (mb)->data_off))
+ ((uint64_t) ((mb)->buf_iova + (mb)->data_off))
/* enforce 512B alignment on default Rx DMA addresses */
#define MBUF_DMA_ADDR_DEFAULT(mb) \
- ((uint64_t) RTE_ALIGN(((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM),\
+ ((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\
FM10K_RX_DATABUF_ALIGN))
static inline void fifo_reset(struct fifo *fifo, uint32_t len)
@@ -289,7 +290,7 @@ static inline uint16_t fifo_remove(struct fifo *fifo)
}
static inline void
-fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint8_t in_port)
+fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port)
{
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index e60d3a36..2d05a466 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1256,14 +1256,17 @@ static int
fm10k_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- /* The host-interface link is always up. The speed is ~50Gbps per Gen3
- * x8 PCIe interface. For now, we leave the speed undefined since there
- * is no 50Gbps Ethernet. */
+ /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
+ * leave the speed undefined since there is no 50Gbps Ethernet.
+ */
dev->data->dev_link.link_speed = 0;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status =
+ dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
return 0;
}
@@ -1346,7 +1349,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return FM10K_NB_XSTATS;
}
-static void
+static int
fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint64_t ipackets, opackets, ibytes, obytes;
@@ -1376,6 +1379,7 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->opackets = opackets;
stats->ibytes = ibytes;
stats->obytes = obytes;
+ return 0;
}
static void
@@ -1590,7 +1594,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return 0;
}
-static void
+static int
fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
@@ -1609,6 +1613,8 @@ fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
+
+ return 0;
}
/* Add/Remove a MAC address, and update filters to main VSI */
@@ -1887,7 +1893,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = mz->iova;
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
@@ -2047,7 +2053,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = mz->iova;
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
@@ -2552,6 +2558,10 @@ fm10k_dev_interrupt_handler_pf(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t cause, status;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ int status_mbx;
+ s32 err;
if (hw->mac.type != fm10k_mac_pf)
return;
@@ -2568,14 +2578,69 @@ fm10k_dev_interrupt_handler_pf(void *param)
if (cause & FM10K_EICR_SWITCHNOTREADY)
PMD_INIT_LOG(ERR, "INT: Switch is not ready");
- if (cause & FM10K_EICR_SWITCHREADY)
+ if (cause & FM10K_EICR_SWITCHREADY) {
PMD_INIT_LOG(INFO, "INT: Switch is ready");
+ if (dev_info->sm_down == 1) {
+ fm10k_mbx_lock(hw);
+
+ /* For recreating logical ports */
+ status_mbx = hw->mac.ops.update_lport_state(hw,
+ hw->mac.dglort_map, MAX_LPORT_NUM, 1);
+ if (status_mbx == FM10K_SUCCESS)
+ PMD_INIT_LOG(INFO,
+ "INT: Recreated Logical port");
+ else
+ PMD_INIT_LOG(INFO,
+ "INT: Logical ports weren't recreated");
+
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ false);
+
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical
+ * ports that have been created, leave to the
+ * application to fully recover Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ if (!(dev->data->dev_conf.rxmode.mq_mode &
+ ETH_MQ_RX_VMDQ_FLAG))
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid,
+ true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
+ }
/* Handle mailbox message */
fm10k_mbx_lock(hw);
- hw->mbx.ops.process(hw, &hw->mbx);
+ err = hw->mbx.ops.process(hw, &hw->mbx);
fm10k_mbx_unlock(hw);
+ if (err == FM10K_ERR_RESET_REQUESTED) {
+ PMD_INIT_LOG(INFO, "INT: Switch is down");
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
+
/* Handle SRAM error */
if (cause & FM10K_EICR_SRAMERROR) {
PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
@@ -2616,6 +2681,11 @@ fm10k_dev_interrupt_handler_vf(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct fm10k_mbx_info *mbx = &hw->mbx;
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
+ const enum fm10k_mbx_state state = mbx->state;
+ int status_mbx;
if (hw->mac.type != fm10k_mac_vf)
return;
@@ -2625,6 +2695,49 @@ fm10k_dev_interrupt_handler_vf(void *param)
hw->mbx.ops.process(hw, &hw->mbx);
fm10k_mbx_unlock(hw);
+ if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone down");
+
+ fm10k_mbx_lock(hw);
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+ fm10k_mbx_unlock(hw);
+
+ /* Setting reset flag */
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
+
+ if (dev_info->sm_down == 1 &&
+ hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
+ PMD_INIT_LOG(INFO, "INT: Switch has gone up");
+ fm10k_mbx_lock(hw);
+ status_mbx = hw->mac.ops.update_xcast_mode(hw,
+ hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
+ if (status_mbx != FM10K_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
+ fm10k_mbx_unlock(hw);
+
+ /* first clear the internal SW recording structure */
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
+ fm10k_MAC_filter_set(dev, hw->mac.addr, false,
+ MAIN_VSI_POOL_NUMBER);
+
+ /*
+ * Add default mac address and vlan for the logical ports that
+ * have been created, leave to the application to fully recover
+ * Rx filtering.
+ */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+ fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+
+ dev_info->sm_down = 0;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+ }
+
/* Re-enable interrupt from device side */
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
@@ -2908,7 +3021,6 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
}
rte_eth_copy_pci_info(dev, pdev);
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
@@ -3142,7 +3254,8 @@ static const struct rte_pci_id pci_id_fm10k_map[] = {
static struct rte_pci_driver rte_pmd_fm10k = {
.id_table = pci_id_fm10k_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_fm10k_pci_probe,
.remove = eth_fm10k_pci_remove,
};
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index c9bb04a0..d6081e48 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -158,10 +158,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
- * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@@ -198,7 +198,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
q->alloc_thresh);
if (unlikely(ret != 0)) {
- uint8_t port = q->port_id;
+ uint16_t port = q->port_id;
PMD_RX_LOG(ERR, "Failed to alloc mbuf");
/*
* Need to restore next_dd if we cannot allocate new
@@ -319,10 +319,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
- * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
+ * So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- first_seg->ol_flags |= PKT_RX_VLAN_PKT;
+ first_seg->ol_flags |= PKT_RX_VLAN;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@@ -356,7 +356,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
q->alloc_thresh);
if (unlikely(ret != 0)) {
- uint8_t port = q->port_id;
+ uint16_t port = q->port_id;
PMD_RX_LOG(ERR, "Failed to alloc mbuf");
/*
* Need to restore next_dd if we cannot allocate new
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index d23bfe9b..ce042d3d 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -81,8 +81,8 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+ PKT_RX_VLAN, PKT_RX_VLAN,
+ PKT_RX_VLAN, PKT_RX_VLAN);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@@ -330,8 +330,8 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 55c79a60..9ab8c84d 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -39,10 +39,13 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
CFLAGS += -DX722_A0_SUPPORT
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_i40e_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# Add extra flags for base driver files (also known as shared code)
@@ -78,7 +81,7 @@ endif
CFLAGS_i40e_lan_hmc.o += -Wno-error
endif
-OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index c57ecded..8e5c593c 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -35,6 +35,7 @@
#include <string.h>
#include <stdint.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdarg.h>
@@ -57,7 +58,6 @@ typedef uint16_t u16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
-typedef int bool;
typedef enum i40e_status_code i40e_status;
#define __iomem
@@ -99,7 +99,6 @@ typedef enum i40e_status_code i40e_status;
#define max(a,b) RTE_MAX(a,b)
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
#define DEBUGOUT1(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5f26e24a..811cc9ff 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -43,6 +43,7 @@
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
@@ -65,6 +66,7 @@
#include "i40e_rxtx.h"
#include "i40e_pf.h"
#include "i40e_regs.h"
+#include "rte_pmd_i40e.h"
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
@@ -86,12 +88,6 @@
/* Flow control default timer */
#define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
-/* Flow control default high water */
-#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
-
-/* Flow control default low water */
-#define I40E_DEFAULT_LOW_WATER (0x1A40/1024)
-
/* Flow control enable fwd bit */
#define I40E_PRTMAC_FWD_CTRL 0x00000001
@@ -101,6 +97,12 @@
/* Kilobytes shift */
#define I40E_KILOSHIFT 10
+/* Flow control default high water */
+#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
+
+/* Flow control default low water */
+#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT)
+
/* Receive Average Packet Size in Byte*/
#define I40E_PACKET_AVERAGE_SIZE 128
@@ -137,10 +139,6 @@
#define I40E_PRTTSYN_TSYNTYPE 0x0e000000
#define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
-#define I40E_MAX_PERCENT 100
-#define I40E_DEFAULT_DCB_APP_NUM 1
-#define I40E_DEFAULT_DCB_APP_PRIO 3
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -250,13 +248,14 @@ static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
static void i40e_dev_stop(struct rte_eth_dev *dev);
static void i40e_dev_close(struct rte_eth_dev *dev);
+static int i40e_dev_reset(struct rte_eth_dev *dev);
static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
-static void i40e_dev_stats_get(struct rte_eth_dev *dev,
+static int i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -278,7 +277,7 @@ static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid);
-static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue,
int on);
@@ -308,7 +307,6 @@ static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
static int i40e_pf_setup(struct i40e_pf *pf);
static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
-static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
static int i40e_dcb_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
@@ -360,6 +358,12 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct rte_eth_dev *dev);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
+static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+ uint16_t seid,
+ uint16_t rule_type,
+ uint16_t *entries,
+ uint16_t count,
+ uint16_t rule_id);
static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t sw_id, uint8_t on);
@@ -449,6 +453,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.dev_start = i40e_dev_start,
.dev_stop = i40e_dev_stop,
.dev_close = i40e_dev_close,
+ .dev_reset = i40e_dev_reset,
.promiscuous_enable = i40e_dev_promiscuous_enable,
.promiscuous_disable = i40e_dev_promiscuous_disable,
.allmulticast_enable = i40e_dev_allmulticast_enable,
@@ -645,7 +650,8 @@ static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_i40e_pmd = {
.id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_i40e_pci_probe,
.remove = eth_i40e_pci_remove,
};
@@ -695,23 +701,22 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
/*
- * Initialize registers for flexible payload, which should be set by NVM.
- * This should be removed from code once it is fixed in NVM.
+ * Force global configuration for flexible payload
+ * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
- /* Initialize registers for parsing packet type of QinQ */
+ /*
+ * Initialize registers for parsing packet type of QinQ
+ * This should be removed from code once proper
+ * configuration API is added to avoid configuration conflicts
+ * between ports of the same device.
+ */
I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
}
@@ -1034,6 +1039,35 @@ err_fdir_hash_map_alloc:
return ret;
}
+static void
+i40e_init_customized_info(struct i40e_pf *pf)
+{
+ int i;
+
+ /* Initialize customized pctype */
+ for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
+ pf->customized_pctype[i].index = i;
+ pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
+ pf->customized_pctype[i].valid = false;
+ }
+
+ pf->gtp_support = false;
+}
+
+void
+i40e_init_queue_region_conf(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i;
+
+ for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
+
+ memset(info, 0, sizeof(struct i40e_queue_regions));
+}
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
@@ -1062,11 +1096,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return 0;
}
i40e_set_default_ptype_table(dev);
+ i40e_set_default_pctype_table(dev);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
@@ -1299,6 +1333,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev);
+ /* Initialize customized information */
+ i40e_init_customized_info(pf);
+
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
@@ -1309,6 +1346,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
if (ret < 0)
goto err_init_fdir_filter_list;
+ /* initialize queue region configuration */
+ i40e_init_queue_region_conf(dev);
+
return 0;
err_init_fdir_filter_list:
@@ -1594,7 +1634,8 @@ i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
static void
__vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
- int base_queue, int nb_queue)
+ int base_queue, int nb_queue,
+ uint16_t itr_idx)
{
int i;
uint32_t val;
@@ -1603,7 +1644,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Bind all RX queues to allocated MSIX interrupt */
for (i = 0; i < nb_queue; i++) {
val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- I40E_QINT_RQCTL_ITR_INDX_MASK |
+ itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
((base_queue + i + 1) <<
I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
@@ -1666,7 +1707,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
}
void
-i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
+i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -1694,7 +1735,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
/* VF bind interrupt */
if (vsi->type == I40E_VSI_SRIOV) {
__vsi_queues_bind_intr(vsi, msix_vect,
- vsi->base_queue, vsi->nb_qps);
+ vsi->base_queue, vsi->nb_qps,
+ itr_idx);
return;
}
@@ -1720,7 +1762,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
/* no enough msix_vect, map all to one */
__vsi_queues_bind_intr(vsi, msix_vect,
vsi->base_queue + i,
- vsi->nb_used_qps - i);
+ vsi->nb_used_qps - i,
+ itr_idx);
for (; !!record && i < vsi->nb_used_qps; i++)
intr_handle->intr_vec[queue_idx + i] =
msix_vect;
@@ -1728,7 +1771,8 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
}
/* 1:1 queue/msix_vect mapping */
__vsi_queues_bind_intr(vsi, msix_vect,
- vsi->base_queue + i, 1);
+ vsi->base_queue + i, 1,
+ itr_idx);
if (!!record)
intr_handle->intr_vec[queue_idx + i] = msix_vect;
@@ -1918,8 +1962,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
hw->adapter_stopped = 0;
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, autonegotiation disabled",
+ dev->data->port_id);
return -EINVAL;
}
@@ -1957,19 +2002,21 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* Map queues with MSIX interrupt */
main_vsi->nb_used_qps = dev->data->nb_rx_queues -
pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(main_vsi);
+ i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+ i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
+ I40E_ITR_INDEX_DEFAULT);
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
/* enable FDIR MSIX interrupt */
if (pf->fdir.fdir_vsi) {
- i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
+ i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
+ I40E_ITR_INDEX_NONE);
i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
}
@@ -2063,7 +2110,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
- struct i40e_mirror_rule *p_mirror;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
@@ -2092,13 +2138,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
/* Set link down */
i40e_dev_set_link_down(dev);
- /* Remove all mirror rules */
- while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
- TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
- rte_free(p_mirror);
- }
- pf->nb_mirror_rule = 0;
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -2115,6 +2154,9 @@ i40e_dev_stop(struct rte_eth_dev *dev)
/* reset hierarchy commit */
pf->tm_conf.committed = false;
+ /* Remove all the queue region configuration */
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
hw->adapter_stopped = 1;
}
@@ -2125,12 +2167,34 @@ i40e_dev_close(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct i40e_mirror_rule *p_mirror;
uint32_t reg;
int i;
+ int ret;
PMD_INIT_FUNC_TRACE();
i40e_dev_stop(dev);
+
+ /* Remove all mirror rules */
+ while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
+ ret = i40e_aq_del_mirror_rule(hw,
+ pf->main_vsi->veb->seid,
+ p_mirror->rule_type,
+ p_mirror->entries,
+ p_mirror->num_entries,
+ p_mirror->id);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
+ "status = %d, aq_err = %d.", ret,
+ hw->aq.asq_last_status);
+
+ /* remove mirror software resource anyway */
+ TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
+ rte_free(p_mirror);
+ pf->nb_mirror_rule--;
+ }
+
i40e_dev_free_queues(dev);
/* Disable interrupt */
@@ -2165,6 +2229,32 @@ i40e_dev_close(struct rte_eth_dev *dev)
I40E_WRITE_FLUSH(hw);
}
+/*
+ * Reset PF device only to re-initialize resources in PMD layer
+ */
+static int
+i40e_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to i40e PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_i40e_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_i40e_dev_init(dev);
+
+ return ret;
+}
+
static void
i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
@@ -2653,7 +2743,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
}
/* Get all statistics of a port */
-static void
+static int
i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -2664,13 +2754,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* call read registers - updates values, now write them to struct */
i40e_read_stats_registers(pf, hw);
- stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
- pf->main_vsi->eth_stats.rx_multicast +
- pf->main_vsi->eth_stats.rx_broadcast -
+ stats->ipackets = ns->eth.rx_unicast +
+ ns->eth.rx_multicast +
+ ns->eth.rx_broadcast -
+ ns->eth.rx_discards -
pf->main_vsi->eth_stats.rx_discards;
- stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
- pf->main_vsi->eth_stats.tx_multicast +
- pf->main_vsi->eth_stats.tx_broadcast;
+ stats->opackets = ns->eth.tx_unicast +
+ ns->eth.tx_multicast +
+ ns->eth.tx_broadcast;
stats->ibytes = ns->eth.rx_bytes;
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
@@ -2752,6 +2843,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ns->checksum_error);
PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
+ return 0;
}
/* Reset the statistics */
@@ -2965,7 +3057,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
- dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3129,7 +3221,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
return ret;
}
-static void
+static int
i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -3162,6 +3254,8 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
+
+ return 0;
}
static void
@@ -3225,6 +3319,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
fc_conf->pause_time = pf->fc_conf.pause_time;
+
+ /* read out from register, in case they are modified by other port */
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
+ I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
+ I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
+
fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
@@ -3400,7 +3501,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
return -EINVAL;
}
- (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
+ rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
@@ -3505,10 +3606,10 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,
}
if (add) {
- (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
- (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
+ rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
+ rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
ETHER_ADDR_LEN);
- (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
+ rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
ETHER_ADDR_LEN);
mac_filter.filter_type = filter->filter_type;
@@ -3519,7 +3620,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,
}
ether_addr_copy(new_mac, &pf->dev_addr);
} else {
- (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+ rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
ETHER_ADDR_LEN);
ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
if (ret != I40E_SUCCESS) {
@@ -3741,7 +3842,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->size = size;
mem->va = mz->addr;
- mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
@@ -4311,7 +4412,7 @@ i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
vsi->info.valid_sections =
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
@@ -4350,7 +4451,7 @@ i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
return ret;
}
- (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
+ rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
sizeof(vsi->info.qs_handle));
return I40E_SUCCESS;
}
@@ -4607,7 +4708,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
if (vsi->type != I40E_VSI_MAIN)
return I40E_ERR_CONFIG;
memset(&def_filter, 0, sizeof(def_filter));
- (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
+ rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
ETH_ADDR_LEN);
def_filter.vlan_tag = 0;
def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
@@ -4626,7 +4727,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return I40E_ERR_NO_MEMORY;
}
mac = &f->mac_info.mac_addr;
- (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
+ rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
ETH_ADDR_LEN);
f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
@@ -4634,7 +4735,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return ret;
}
- (void)rte_memcpy(&filter.mac_addr,
+ rte_memcpy(&filter.mac_addr,
(struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
return i40e_vsi_add_mac(vsi, &filter);
@@ -4895,7 +4996,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
PMD_DRV_LOG(ERR, "Failed to get VSI params");
goto fail_msix_alloc;
}
- (void)rte_memcpy(&vsi->info, &ctxt.info,
+ rte_memcpy(&vsi->info, &ctxt.info,
sizeof(struct i40e_aqc_vsi_properties_data));
vsi->vsi_id = ctxt.vsi_number;
vsi->info.valid_sections = 0;
@@ -4913,7 +5014,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- (void)rte_memcpy(&ctxt.info, &vsi->info,
+ rte_memcpy(&ctxt.info, &vsi->info,
sizeof(struct i40e_aqc_vsi_properties_data));
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
@@ -4934,15 +5035,15 @@ i40e_vsi_setup(struct i40e_pf *pf,
goto fail_msix_alloc;
}
- (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
sizeof(vsi->info.tc_mapping));
- (void)rte_memcpy(&vsi->info.queue_mapping,
+ rte_memcpy(&vsi->info.queue_mapping,
&ctxt.info.queue_mapping,
sizeof(vsi->info.queue_mapping));
vsi->info.mapping_flags = ctxt.info.mapping_flags;
vsi->info.valid_sections = 0;
- (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
+ rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
ETH_ADDR_LEN);
/**
@@ -5085,7 +5186,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
}
/* MAC/VLAN configuration */
- (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
@@ -5197,7 +5298,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
vsi->info.port_vlan_flags |= vlan_flags;
ctxt.seid = vsi->seid;
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
@@ -5215,7 +5316,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
/* Apply vlan offload setting */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
- i40e_vlan_offload_set(dev, mask);
+ ret = i40e_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Failed to update vlan offload");
+ return ret;
+ }
/* Apply double-vlan setting, not implemented yet */
@@ -5991,7 +6096,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
memset(req_list, 0, ele_buff_size);
for (i = 0; i < actual_num; i++) {
- (void)rte_memcpy(req_list[i].mac_addr,
+ rte_memcpy(req_list[i].mac_addr,
&filter[num + i].macaddr, ETH_ADDR_LEN);
req_list[i].vlan_tag =
rte_cpu_to_le_16(filter[num + i].vlan_id);
@@ -6066,7 +6171,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
memset(req_list, 0, ele_buff_size);
for (i = 0; i < actual_num; i++) {
- (void)rte_memcpy(req_list[i].mac_addr,
+ rte_memcpy(req_list[i].mac_addr,
&filter[num + i].macaddr, ETH_ADDR_LEN);
req_list[i].vlan_tag =
rte_cpu_to_le_16(filter[num + i].vlan_id);
@@ -6217,7 +6322,7 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
"vlan number doesn't match");
return I40E_ERR_PARAM;
}
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
addr, ETH_ADDR_LEN);
mv_f[i].vlan_id =
j * I40E_UINT32_BIT_SIZE + k;
@@ -6246,7 +6351,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
PMD_DRV_LOG(ERR, "buffer number not match");
return I40E_ERR_PARAM;
}
- (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
ETH_ADDR_LEN);
mv_f[i].vlan_id = vlan;
mv_f[i].filter_type = f->mac_info.filter_type;
@@ -6282,7 +6387,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
i = 0;
if (vsi->vlan_num == 0) {
TAILQ_FOREACH(f, &vsi->mac_list, next) {
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr, ETH_ADDR_LEN);
mv_f[i].filter_type = f->mac_info.filter_type;
mv_f[i].vlan_id = 0;
@@ -6452,7 +6557,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = mac_filter->filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
+ rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
ETH_ADDR_LEN);
}
@@ -6475,7 +6580,7 @@ i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
ret = I40E_ERR_NO_MEMORY;
goto DONE;
}
- (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
+ rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
ETH_ADDR_LEN);
f->mac_info.filter_type = mac_filter->filter_type;
TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
@@ -6522,7 +6627,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
+ rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
@@ -6549,104 +6654,36 @@ DONE:
/* Configure hash enable flags for RSS */
uint64_t
-i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
+i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
{
uint64_t hena = 0;
+ int i;
if (!flags)
return hena;
- if (flags & ETH_RSS_FRAG_IPV4)
- hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
- if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
- if (type == I40E_MAC_X722) {
- hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
- } else
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- }
- if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
- if (type == I40E_MAC_X722) {
- hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
- } else
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- }
- if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
- if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- if (flags & ETH_RSS_FRAG_IPV6)
- hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
- if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
- if (type == I40E_MAC_X722) {
- hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
- } else
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- }
- if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
- if (type == I40E_MAC_X722) {
- hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
- } else
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (flags & (1ULL << i))
+ hena |= adapter->pctypes_tbl[i];
}
- if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
- if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- if (flags & ETH_RSS_L2_PAYLOAD)
- hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
return hena;
}
/* Parse the hash enable flags */
uint64_t
-i40e_parse_hena(uint64_t flags)
+i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
{
uint64_t rss_hf = 0;
if (!flags)
return rss_hf;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
- rss_hf |= ETH_RSS_FRAG_IPV4;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
- rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
- rss_hf |= ETH_RSS_FRAG_IPV6;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
- rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
- if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
- rss_hf |= ETH_RSS_L2_PAYLOAD;
+ int i;
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (flags & adapter->pctypes_tbl[i])
+ rss_hf |= (1ULL << i);
+ }
return rss_hf;
}
@@ -6655,16 +6692,9 @@ static void
i40e_pf_disable_rss(struct i40e_pf *pf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint64_t hena;
- hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
- hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- if (hw->mac.type == I40E_MAC_X722)
- hena &= ~I40E_RSS_HENA_ALL_X722;
- else
- hena &= ~I40E_RSS_HENA_ALL;
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
I40E_WRITE_FLUSH(hw);
}
@@ -6736,7 +6766,6 @@ static int
i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint64_t rss_hf;
uint64_t hena;
int ret;
@@ -6745,14 +6774,7 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
if (ret)
return ret;
- rss_hf = rss_conf->rss_hf;
- hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
- hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- if (hw->mac.type == I40E_MAC_X722)
- hena &= ~I40E_RSS_HENA_ALL_X722;
- else
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf, hw->mac.type);
+ hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
@@ -6766,14 +6788,13 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
uint64_t hena;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- if (!(hena & ((hw->mac.type == I40E_MAC_X722)
- ? I40E_RSS_HENA_ALL_X722
- : I40E_RSS_HENA_ALL))) { /* RSS disabled */
+
+ if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0; /* Nothing to do */
@@ -6798,7 +6819,7 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- rss_conf->rss_hf = i40e_parse_hena(hena);
+ rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
return 0;
}
@@ -7071,7 +7092,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
/* create L1 filter */
filter_replace.old_filter_type =
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
- filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace.tr_bit = 0;
/* Prepare the buffer, 3 entries */
@@ -7119,12 +7140,12 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
@@ -7142,12 +7163,131 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
I40E_AQC_MIRROR_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
filter_replace.new_filter_type =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum i40e_status_code
+i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* For GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[6] = 0xFF;
+ filter_replace_buf.data[7] = 0xFF;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* for GTP-C */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* for GTP-U */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
@@ -7238,7 +7378,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x40;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
break;
case I40E_TUNNEL_TYPE_MPLSoGRE:
if (!pf->mpls_replace_flag) {
@@ -7254,7 +7394,37 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
0x0;
big_buffer = 1;
- tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
+ break;
+ case I40E_TUNNEL_TYPE_GTPC:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ break;
+ case I40E_TUNNEL_TYPE_GTPU:
+ if (!pf->gtp_replace_flag) {
+ i40e_replace_gtp_l1_filter(pf);
+ i40e_replace_gtp_cloud_filter(pf);
+ pf->gtp_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
+ (teid_le >> 16) & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
+ teid_le & 0xFFFF;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
+ 0x0;
+ big_buffer = 1;
break;
case I40E_TUNNEL_TYPE_QINQ:
if (!pf->qinq_replace_flag) {
@@ -7282,13 +7452,19 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
pfilter->element.flags =
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X11;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_0X12;
else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
pfilter->element.flags |=
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ I40E_AQC_ADD_CLOUD_FILTER_0X10;
else {
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
&pfilter->element.flags);
@@ -7573,7 +7749,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
}
rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
i40e_pf_disable_rss(pf);
return 0;
}
@@ -7794,9 +7970,9 @@ static int
i40e_get_hash_filter_global_config(struct i40e_hw *hw,
struct rte_eth_hash_global_conf *g_cfg)
{
- uint32_t reg, mask = I40E_FLOW_TYPES;
- uint16_t i;
- enum i40e_filter_pctype pctype;
+ struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
+ uint32_t reg;
+ uint16_t i, j;
memset(g_cfg, 0, sizeof(*g_cfg));
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
@@ -7807,29 +7983,38 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
PMD_DRV_LOG(DEBUG, "Hash function is %s",
(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
- for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
- if (!(mask & (1UL << i)))
- continue;
- mask &= ~(1UL << i);
- /* Bit set indicats the coresponding flow type is supported */
- g_cfg->valid_bit_mask[0] |= (1UL << i);
- /* if flowtype is invalid, continue */
- if (!I40E_VALID_FLOW(i))
+ /*
+ * We work only with lowest 32 bits which is not correct, but to work
+ * properly the valid_bit_mask size should be increased up to 64 bits
+ * and this will brake ABI. This modification will be done in next
+ * release
+ */
+ g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
+
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
+ if (!adapter->pctypes_tbl[i])
continue;
- pctype = i40e_flowtype_to_pctype(i);
- reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
- if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
- g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (adapter->pctypes_tbl[i] & (1ULL << j)) {
+ reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
+ if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
+ g_cfg->sym_hash_enable_mask[0] |=
+ (1UL << i);
+ }
+ }
+ }
}
return 0;
}
static int
-i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
+i40e_hash_global_config_check(const struct i40e_adapter *adapter,
+ const struct rte_eth_hash_global_conf *g_cfg)
{
uint32_t i;
- uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
+ uint32_t mask0, i40e_mask = adapter->flow_types_mask;
if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
@@ -7872,64 +8057,36 @@ static int
i40e_set_hash_filter_global_config(struct i40e_hw *hw,
struct rte_eth_hash_global_conf *g_cfg)
{
+ struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
int ret;
- uint16_t i;
+ uint16_t i, j;
uint32_t reg;
- uint32_t mask0 = g_cfg->valid_bit_mask[0];
- enum i40e_filter_pctype pctype;
+ /*
+ * We work only with lowest 32 bits which is not correct, but to work
+ * properly the valid_bit_mask size should be increased up to 64 bits
+ * and this will brake ABI. This modification will be done in next
+ * release
+ */
+ uint32_t mask0 = g_cfg->valid_bit_mask[0] &
+ (uint32_t)adapter->flow_types_mask;
/* Check the input parameters */
- ret = i40e_hash_global_config_check(g_cfg);
+ ret = i40e_hash_global_config_check(adapter, g_cfg);
if (ret < 0)
return ret;
- for (i = 0; mask0 && i < UINT32_BIT; i++) {
- if (!(mask0 & (1UL << i)))
- continue;
- mask0 &= ~(1UL << i);
- /* if flowtype is invalid, continue */
- if (!I40E_VALID_FLOW(i))
- continue;
- pctype = i40e_flowtype_to_pctype(i);
- reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
- I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- if (hw->mac.type == I40E_MAC_X722) {
- if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
- reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
- reg);
- } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
- reg);
- } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
- reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
- reg);
- } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
- reg);
- } else {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
- reg);
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
+ if (mask0 & (1UL << i)) {
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+ I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
+
+ for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+ j < I40E_FILTER_PCTYPE_MAX; j++) {
+ if (adapter->pctypes_tbl[i] & (1ULL << j))
+ i40e_write_rx_ctl(hw,
+ I40E_GLQF_HSYM(j),
+ reg);
}
- } else {
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
}
}
@@ -8551,16 +8708,14 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
uint64_t input_set, inset_reg;
uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
int num, i;
+ uint16_t flow_type;
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (hw->mac.type == I40E_MAC_X722) {
- if (!I40E_VALID_PCTYPE_X722(pctype))
- continue;
- } else {
- if (!I40E_VALID_PCTYPE(pctype))
- continue;
- }
+ flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
+
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
input_set = i40e_get_default_input_set(pctype);
@@ -8623,7 +8778,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
return -EINVAL;
}
- if (!I40E_VALID_FLOW(conf->flow_type)) {
+ pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
@@ -8631,10 +8787,8 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
if (hw->mac.type == I40E_MAC_X722) {
/* get translated pctype value in fd pctype register */
pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
- I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
- conf->flow_type)));
- } else
- pctype = i40e_flowtype_to_pctype(conf->flow_type);
+ I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
@@ -8642,11 +8796,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
PMD_DRV_LOG(ERR, "Failed to parse input set");
return -EINVAL;
}
- if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
- input_set) != 0) {
- PMD_DRV_LOG(ERR, "Invalid input set");
- return -EINVAL;
- }
+
if (conf->op == RTE_ETH_INPUT_SET_ADD) {
/* get inset value in register */
inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
@@ -8700,24 +8850,19 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
return -EINVAL;
}
- if (!I40E_VALID_FLOW(conf->flow_type)) {
+ pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
+
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
- pctype = i40e_flowtype_to_pctype(conf->flow_type);
-
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to parse input set");
return -EINVAL;
}
- if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
- input_set) != 0) {
- PMD_DRV_LOG(ERR, "Invalid input set");
- return -EINVAL;
- }
/* get inset value in register */
inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
@@ -9156,72 +9301,42 @@ i40e_hw_init(struct rte_eth_dev *dev)
i40e_set_symmetric_hash_enable_per_port(hw, 0);
}
+/*
+ * For X722 it is possible to have multiple pctypes mapped to the same flowtype
+ * however this function will return only one highest pctype index,
+ * which is not quite correct. This is known problem of i40e driver
+ * and needs to be fixed later.
+ */
enum i40e_filter_pctype
-i40e_flowtype_to_pctype(uint16_t flow_type)
-{
- static const enum i40e_filter_pctype pctype_table[] = {
- [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
- [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
- [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
- [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
- [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
- };
+i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
+{
+ int i;
+ uint64_t pctype_mask;
- return pctype_table[flow_type];
+ if (flow_type < I40E_FLOW_TYPE_MAX) {
+ pctype_mask = adapter->pctypes_tbl[flow_type];
+ for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
+ if (pctype_mask & (1ULL << i))
+ return (enum i40e_filter_pctype)i;
+ }
+ }
+ return I40E_FILTER_PCTYPE_INVALID;
}
uint16_t
-i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
+i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
+ enum i40e_filter_pctype pctype)
{
- static const uint16_t flowtype_table[] = {
- [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
- [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
- [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
- [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
- [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
- RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
- [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
- RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
- [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
- RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
- [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
- [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
- [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
- [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
- [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
- [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
- RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
- [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
- RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
- [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
- RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
- [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
- RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
- [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
- };
+ uint16_t flowtype;
+ uint64_t pctype_mask = 1ULL << pctype;
+
+ for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
+ flowtype++) {
+ if (adapter->pctypes_tbl[flowtype] & pctype_mask)
+ return flowtype;
+ }
- return flowtype_table[pctype];
+ return RTE_ETH_FLOW_UNKNOWN;
}
/*
@@ -9238,7 +9353,7 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
/* For both X710 and XL710 */
#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
@@ -10248,9 +10363,9 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
goto out;
}
/* update the local VSI info with updated queue map */
- (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
sizeof(vsi->info.tc_mapping));
- (void)rte_memcpy(&vsi->info.queue_mapping,
+ rte_memcpy(&vsi->info.queue_mapping,
&ctxt.info.queue_mapping,
sizeof(vsi->info.queue_mapping));
vsi->info.mapping_flags = ctxt.info.mapping_flags;
@@ -10377,7 +10492,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
*
* Returns 0 on success, negative value on failure
*/
-static int
+int
i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -10809,14 +10924,14 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
sizeof(f->input.general_fields));
if (((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
((f->input.flags &
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
big_buffer = 1;
if (big_buffer)
@@ -10851,6 +10966,301 @@ is_i40e_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &rte_i40e_pmd);
}
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
+{
+ int i;
+
+ for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
+ if (pf->customized_pctype[i].index == index)
+ return &pf->customized_pctype[i];
+ }
+ return NULL;
+}
+
+static int
+i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t pctype_num;
+ struct rte_pmd_i40e_ptype_info *pctype;
+ uint32_t buff_size;
+ struct i40e_customized_pctype *new_pctype = NULL;
+ uint8_t proto_id;
+ uint8_t pctype_value;
+ char name[64];
+ uint32_t i, j, n;
+ int ret;
+
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&pctype_num, sizeof(pctype_num),
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype number");
+ return -1;
+ }
+ if (!pctype_num) {
+ PMD_DRV_LOG(INFO, "No new pctype added");
+ return -1;
+ }
+
+ buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
+ pctype = rte_zmalloc("new_pctype", buff_size, 0);
+ if (!pctype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+ /* get information about new pctype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)pctype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get pctype list");
+ rte_free(pctype);
+ return -1;
+ }
+
+ /* Update customized pctype. */
+ for (i = 0; i < pctype_num; i++) {
+ pctype_value = pctype[i].ptype_id;
+ memset(name, 0, sizeof(name));
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = pctype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ strcat(name, proto[n].name);
+ strcat(name, "_");
+ break;
+ }
+ }
+ name[strlen(name) - 1] = '\0';
+ if (!strcmp(name, "GTPC"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPC);
+ else if (!strcmp(name, "GTPU_IPV4"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV4);
+ else if (!strcmp(name, "GTPU_IPV6"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV6);
+ else if (!strcmp(name, "GTPU"))
+ new_pctype =
+ i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU);
+ if (new_pctype) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ }
+ }
+
+ rte_free(pctype);
+ return 0;
+}
+
+static int
+i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto)
+{
+ struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
+ uint16_t port_id = dev->data->port_id;
+ uint32_t ptype_num;
+ struct rte_pmd_i40e_ptype_info *ptype;
+ uint32_t buff_size;
+ uint8_t proto_id;
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
+ uint32_t i, j, n;
+ bool inner_ip;
+ int ret;
+
+ /* get information about new ptype num */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&ptype_num, sizeof(ptype_num),
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype number");
+ return ret;
+ }
+ if (!ptype_num) {
+ PMD_DRV_LOG(INFO, "No new ptype added");
+ return -1;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
+ ptype = rte_zmalloc("new_ptype", buff_size, 0);
+ if (!ptype) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -1;
+ }
+
+ /* get information about new ptype list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)ptype, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get ptype list");
+ rte_free(ptype);
+ return ret;
+ }
+
+ buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
+ ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
+ if (!ptype_mapping) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ rte_free(ptype);
+ return -1;
+ }
+
+ /* Update ptype mapping table. */
+ for (i = 0; i < ptype_num; i++) {
+ ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
+ ptype_mapping[i].sw_ptype = 0;
+ inner_ip = false;
+ for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
+ proto_id = ptype[i].protocols[j];
+ if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
+ continue;
+ for (n = 0; n < proto_num; n++) {
+ if (proto[n].proto_id != proto_id)
+ continue;
+ memset(name, 0, sizeof(name));
+ strcpy(name, proto[n].name);
+ if (!strncmp(name, "IPV4", 4) && !inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ inner_ip = true;
+ } else if (!strncmp(name, "IPV4FRAG", 8) &&
+ inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncmp(name, "IPV4", 4) &&
+ inner_ip)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncmp(name, "IPV6", 4) &&
+ !inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ inner_ip = true;
+ } else if (!strncmp(name, "IPV6FRAG", 8) &&
+ inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_FRAG;
+ } else if (!strncmp(name, "IPV6", 4) &&
+ inner_ip)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncmp(name, "GTPC", 4))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPC;
+ else if (!strncmp(name, "GTPU", 4))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPU;
+ else if (!strncmp(name, "UDP", 3))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_UDP;
+ else if (!strncmp(name, "TCP", 3))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_TCP;
+ else if (!strncmp(name, "SCTP", 4))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_SCTP;
+ else if (!strncmp(name, "ICMP", 4) ||
+ !strncmp(name, "ICMPV6", 6))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_INNER_L4_ICMP;
+
+ break;
+ }
+ }
+ }
+
+ ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
+ ptype_num, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to update mapping table.");
+
+ rte_free(ptype_mapping);
+ rte_free(ptype);
+ return ret;
+}
+
+void
+i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint32_t proto_num;
+ struct rte_pmd_i40e_proto_info *proto;
+ uint32_t buff_size;
+ uint32_t i;
+ int ret;
+
+ /* get information about protocol number */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)&proto_num, sizeof(proto_num),
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol number");
+ return;
+ }
+ if (!proto_num) {
+ PMD_DRV_LOG(INFO, "No new protocol added");
+ return;
+ }
+
+ buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
+ proto = rte_zmalloc("new_proto", buff_size, 0);
+ if (!proto) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return;
+ }
+
+ /* get information about protocol list */
+ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
+ (uint8_t *)proto, buff_size,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get protocol list");
+ rte_free(proto);
+ return;
+ }
+
+ /* Check if GTP is supported. */
+ for (i = 0; i < proto_num; i++) {
+ if (!strncmp(proto[i].name, "GTP", 3)) {
+ pf->gtp_support = true;
+ break;
+ }
+ }
+
+ /* Update customized pctype info */
+ ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
+ proto_num, proto);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No pctype is updated.");
+
+ /* Update customized ptype info */
+ ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
+ proto_num, proto);
+ if (ret)
+ PMD_DRV_LOG(INFO, "No ptype is updated.");
+
+ rte_free(proto);
+}
+
/* Create a QinQ cloud filter
*
* The Fortville NIC has limited resources for tunnel filters,
@@ -10911,7 +11321,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
/* create L1 filter */
filter_replace.old_filter_type =
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
filter_replace.tr_bit = 0;
/* Prepare the buffer, 2 entries */
@@ -10942,13 +11352,13 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
/* create L2 filter, input for L2 filter will be L1 filter */
filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
- filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
/* Prepare the buffer, 2 entries */
filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
filter_replace_buf.data[0] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
- filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
filter_replace_buf.data[4] |=
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 48abc05a..cd67453d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -186,9 +186,9 @@ enum i40e_flxpld_layer_idx {
/* Default queue interrupt throttling time in microseconds */
#define I40E_ITR_INDEX_DEFAULT 0
+#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
@@ -260,6 +260,12 @@ enum i40e_flxpld_layer_idx {
#define I40E_QOS_BW_WEIGHT_MIN 1
/* The max bandwidth weight is 127. */
#define I40E_QOS_BW_WEIGHT_MAX 127
+/* The max queue region index is 7. */
+#define I40E_REGION_MAX_INDEX 7
+
+#define I40E_MAX_PERCENT 100
+#define I40E_DEFAULT_DCB_APP_NUM 1
+#define I40E_DEFAULT_DCB_APP_PRIO 3
/**
* The overhead from MTU to max frame size.
@@ -460,6 +466,119 @@ struct i40e_vmdq_info {
#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
#define I40E_FDIR_IPv6_TC_OFFSET 20
+/* A structure used to define the input for GTP flow */
+struct i40e_gtp_flow {
+ struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */
+ uint8_t msg_type; /* Message type. */
+ uint32_t teid; /* TEID in big endian. */
+};
+
+/* A structure used to define the input for GTP IPV4 flow */
+struct i40e_gtp_ipv4_flow {
+ struct i40e_gtp_flow gtp;
+ struct rte_eth_ipv4_flow ip4;
+};
+
+/* A structure used to define the input for GTP IPV6 flow */
+struct i40e_gtp_ipv6_flow {
+ struct i40e_gtp_flow gtp;
+ struct rte_eth_ipv6_flow ip6;
+};
+
+/* A structure used to define the input for raw type flow */
+struct i40e_raw_flow {
+ uint16_t pctype;
+ void *packet;
+ uint32_t length;
+};
+
+/*
+ * A union contains the inputs for all types of flow
+ * items in flows need to be in big endian
+ */
+union i40e_fdir_flow {
+ struct rte_eth_l2_flow l2_flow;
+ struct rte_eth_udpv4_flow udp4_flow;
+ struct rte_eth_tcpv4_flow tcp4_flow;
+ struct rte_eth_sctpv4_flow sctp4_flow;
+ struct rte_eth_ipv4_flow ip4_flow;
+ struct rte_eth_udpv6_flow udp6_flow;
+ struct rte_eth_tcpv6_flow tcp6_flow;
+ struct rte_eth_sctpv6_flow sctp6_flow;
+ struct rte_eth_ipv6_flow ipv6_flow;
+ struct i40e_gtp_flow gtp_flow;
+ struct i40e_gtp_ipv4_flow gtp_ipv4_flow;
+ struct i40e_gtp_ipv6_flow gtp_ipv6_flow;
+ struct i40e_raw_flow raw_flow;
+};
+
+enum i40e_fdir_ip_type {
+ I40E_FDIR_IPTYPE_IPV4,
+ I40E_FDIR_IPTYPE_IPV6,
+};
+
+/* A structure used to contain extend input of flow */
+struct i40e_fdir_flow_ext {
+ uint16_t vlan_tci;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ /* It is filled by the flexible payload to match. */
+ uint8_t is_vf; /* 1 for VF, 0 for port dev */
+ uint16_t dst_id; /* VF ID, available when is_vf is 1*/
+ bool inner_ip; /* If there is inner ip */
+ enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */
+ bool customized_pctype; /* If customized pctype is used */
+ bool pkt_template; /* If raw packet template is used */
+};
+
+/* A structure used to define the input for a flow director filter entry */
+struct i40e_fdir_input {
+ enum i40e_filter_pctype pctype;
+ union i40e_fdir_flow flow;
+ /* Flow fields to match, dependent on flow_type */
+ struct i40e_fdir_flow_ext flow_ext;
+ /* Additional fields to match */
+};
+
+/* Behavior will be taken if FDIR match */
+enum i40e_fdir_behavior {
+ I40E_FDIR_ACCEPT = 0,
+ I40E_FDIR_REJECT,
+ I40E_FDIR_PASSTHRU,
+};
+
+/* Flow director report status
+ * It defines what will be reported if FDIR entry is matched.
+ */
+enum i40e_fdir_status {
+ I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */
+ I40E_FDIR_REPORT_ID, /* Only report FD ID. */
+ I40E_FDIR_REPORT_ID_FLEX_4, /* Report FD ID and 4 flex bytes. */
+ I40E_FDIR_REPORT_FLEX_8, /* Report 8 flex bytes. */
+};
+
+/* A structure used to define an action when match FDIR packet filter. */
+struct i40e_fdir_action {
+ uint16_t rx_queue; /* Queue assigned to if FDIR match. */
+ enum i40e_fdir_behavior behavior; /* Behavior will be taken */
+ enum i40e_fdir_status report_status; /* Status report option */
+ /* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or
+ * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
+ * flex bytes start from in flexible payload.
+ */
+ uint8_t flex_off;
+};
+
+/* A structure used to define the flow director filter entry by filter_ctrl API
+ * It supports RTE_ETH_FILTER_FDIR with RTE_ETH_FILTER_ADD and
+ * RTE_ETH_FILTER_DELETE operations.
+ */
+struct i40e_fdir_filter_conf {
+ uint32_t soft_id;
+ /* ID, an unique value is required when deal with FDIR entry */
+ struct i40e_fdir_input input; /* Input set */
+ struct i40e_fdir_action action; /* Action taken when match */
+};
+
/*
* Structure to store flex pit for flow diretor.
*/
@@ -478,12 +597,13 @@ struct i40e_fdir_flex_mask {
} bitmask[I40E_FDIR_BITMASK_NUM_WORD];
};
-#define I40E_FILTER_PCTYPE_MAX 64
-#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+#define I40E_FILTER_PCTYPE_INVALID 0
+#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
struct i40e_fdir_filter {
TAILQ_ENTRY(i40e_fdir_filter) rules;
- struct rte_eth_fdir_filter fdir;
+ struct i40e_fdir_filter_conf fdir;
};
TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
@@ -541,17 +661,49 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* queue region info */
+struct i40e_queue_region_info {
+ /* the region id for this configuration */
+ uint8_t region_id;
+ /* the start queue index for this region */
+ uint8_t queue_start_index;
+ /* the total queue number of this queue region */
+ uint8_t queue_num;
+ /* the total number of user priority for this region */
+ uint8_t user_priority_num;
+ /* the packet's user priority for this region */
+ uint8_t user_priority[I40E_MAX_USER_PRIORITY];
+ /* the total number of flowtype for this region */
+ uint8_t flowtype_num;
+ /**
+ * the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX];
+};
+
+struct i40e_queue_regions {
+ /* the total number of queue region for this port */
+ uint16_t queue_region_number;
+ struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1];
+};
+
/* Tunnel filter number HW supports */
#define I40E_MAX_TUNNEL_FILTER_NUM 400
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
-#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
-#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
-#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP 8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE 9
+#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X11 0x11
+#define I40E_AQC_ADD_L1_FILTER_0X12 0x12
+#define I40E_AQC_ADD_L1_FILTER_0X13 0x13
+#define I40E_AQC_NEW_TR_21 21
+#define I40E_AQC_NEW_TR_22 22
enum i40e_tunnel_iptype {
I40E_TUNNEL_IPTYPE_IPV4,
@@ -599,6 +751,8 @@ enum i40e_tunnel_type {
I40E_TUNNEL_TYPE_MPLSoUDP,
I40E_TUNNEL_TYPE_MPLSoGRE,
I40E_TUNNEL_TYPE_QINQ,
+ I40E_TUNNEL_TYPE_GTPC,
+ I40E_TUNNEL_TYPE_GTPU,
I40E_TUNNEL_TYPE_MAX,
};
@@ -722,6 +876,21 @@ struct i40e_tm_conf {
bool committed;
};
+enum i40e_new_pctype {
+ I40E_CUSTOMIZED_GTPC = 0,
+ I40E_CUSTOMIZED_GTPU_IPV4,
+ I40E_CUSTOMIZED_GTPU_IPV6,
+ I40E_CUSTOMIZED_GTPU,
+ I40E_CUSTOMIZED_MAX,
+};
+
+#define I40E_FILTER_PCTYPE_INVALID 0
+struct i40e_customized_pctype {
+ enum i40e_new_pctype index; /* Indicate which customized pctype */
+ uint8_t pctype; /* New pctype value */
+ bool valid; /* Check if it's valid */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -776,6 +945,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -784,8 +954,14 @@ struct i40e_pf {
bool floating_veb_list[I40E_MAX_VF];
struct i40e_flow_list flow_list;
bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
+ bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
bool qinq_replace_flag; /* QINQ filter replace is done */
struct i40e_tm_conf tm_conf;
+
+ /* Dynamic Device Personalization */
+ bool gtp_support; /* 1 - support GTP-C and GTP-U */
+ /* customer customized pctype */
+ struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
};
enum pending_msg {
@@ -852,7 +1028,8 @@ struct i40e_vf {
uint64_t flags;
};
-#define I40E_MAX_PKT_TYPE 256
+#define I40E_MAX_PKT_TYPE 256
+#define I40E_FLOW_TYPE_MAX 64
/*
* Structure to store private data for each PF/VF instance.
@@ -881,13 +1058,17 @@ struct i40e_adapter {
/* ptype mapping table */
uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
+ /* flow type to pctype mapping table */
+ uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned;
+ uint64_t flow_types_mask;
+ uint64_t pctypes_mask;
};
extern const struct rte_flow_ops i40e_flow_ops;
union i40e_filter_t {
struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_fdir_filter fdir_filter;
+ struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
};
@@ -919,14 +1100,14 @@ void i40e_update_vsi_stats(struct i40e_vsi *vsi);
void i40e_pf_disable_irq0(struct i40e_hw *hw);
void i40e_pf_enable_irq0(struct i40e_hw *hw);
int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi);
+void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx);
void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi);
int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
struct i40e_vsi_vlan_pvid_info *info);
int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on);
-uint64_t i40e_config_hena(uint64_t flags, enum i40e_mac_type type);
-uint64_t i40e_parse_hena(uint64_t flags);
+uint64_t i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags);
+uint64_t i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags);
enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
int i40e_fdir_setup(struct i40e_pf *pf);
@@ -935,8 +1116,11 @@ const struct rte_memzone *i40e_memzone_reserve(const char *name,
int socket_id);
int i40e_fdir_configure(struct rte_eth_dev *dev);
void i40e_fdir_teardown(struct i40e_pf *pf);
-enum i40e_filter_pctype i40e_flowtype_to_pctype(uint16_t flow_type);
-uint16_t i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype);
+enum i40e_filter_pctype
+ i40e_flowtype_to_pctype(const struct i40e_adapter *adapter,
+ uint16_t flow_type);
+uint16_t i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
+ enum i40e_filter_pctype pctype);
int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -961,7 +1145,7 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
- struct rte_eth_fdir_input *input);
+ struct i40e_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
@@ -974,6 +1158,9 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add);
+int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add);
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
@@ -1003,6 +1190,14 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
void i40e_tm_conf_init(struct rte_eth_dev *dev);
void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+struct i40e_customized_pctype*
+i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
+void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
+ uint32_t pkg_size);
+int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
+int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+ struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
+void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index f6d82934..02d9e579 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -47,10 +47,10 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -67,8 +67,6 @@
#include "i40e_rxtx.h"
#include "i40e_ethdev.h"
#include "i40e_pf.h"
-#define I40EVF_VSI_DEFAULT_MSIX_INTR 1
-#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
@@ -108,7 +106,7 @@ static void i40evf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40evf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
+static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -118,10 +116,9 @@ static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
- int on);
+static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void i40evf_dev_close(struct rte_eth_dev *dev);
+static int i40evf_dev_reset(struct rte_eth_dev *dev);
static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
@@ -199,15 +196,16 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.allmulticast_disable = i40evf_dev_allmulticast_disable,
.link_update = i40evf_dev_link_update,
.stats_get = i40evf_dev_stats_get,
+ .stats_reset = i40evf_dev_xstats_reset,
.xstats_get = i40evf_dev_xstats_get,
.xstats_get_names = i40evf_dev_xstats_get_names,
.xstats_reset = i40evf_dev_xstats_reset,
.dev_close = i40evf_dev_close,
+ .dev_reset = i40evf_dev_reset,
.dev_infos_get = i40evf_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40evf_vlan_filter_set,
.vlan_offload_set = i40evf_vlan_offload_set,
- .vlan_pvid_set = i40evf_vlan_pvid_set,
.rx_queue_start = i40evf_dev_rx_queue_start,
.rx_queue_stop = i40evf_dev_rx_queue_stop,
.tx_queue_start = i40evf_dev_tx_queue_start,
@@ -431,9 +429,7 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
pver = (struct virtchnl_version_info *)args.out_buffer;
vf->version_major = pver->major;
vf->version_minor = pver->minor;
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
- else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
+ if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR))
PMD_DRV_LOG(INFO, "Peer is Linux PF host");
else {
@@ -481,7 +477,7 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev)
len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
- (void)rte_memcpy(vf->vf_res, args.out_buffer,
+ rte_memcpy(vf->vf_res, args.out_buffer,
RTE_MIN(args.out_size, len));
i40e_vf_parse_hw_config(hw, vf->vf_res);
@@ -563,37 +559,6 @@ i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
return ret;
}
-static int
-i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
- struct i40e_vsi_vlan_pvid_info *info)
-{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
- struct vf_cmd_info args;
- struct virtchnl_pvid_info tpid_info;
-
- if (info == NULL) {
- PMD_DRV_LOG(ERR, "invalid parameters");
- return I40E_ERR_PARAM;
- }
-
- memset(&tpid_info, 0, sizeof(tpid_info));
- tpid_info.vsi_id = vf->vsi_res->vsi_id;
- (void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
-
- args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
- args.in_args = (uint8_t *)&tpid_info;
- args.in_args_size = sizeof(tpid_info);
- args.out_buffer = vf->aq_resp;
- args.out_size = I40E_AQ_BUF_SZ;
-
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
-
- return err;
-}
-
static void
i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
uint16_t vsi_id,
@@ -629,7 +594,6 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
}
}
-/* It configures VSI queues to co-work with Linux PF host */
static int
i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
{
@@ -673,72 +637,6 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
return ret;
}
-/* It configures VSI queues to co-work with DPDK PF host */
-static int
-i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
-{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_rx_queue **rxq =
- (struct i40e_rx_queue **)dev->data->rx_queues;
- struct i40e_tx_queue **txq =
- (struct i40e_tx_queue **)dev->data->tx_queues;
- struct virtchnl_vsi_queue_config_ext_info *vc_vqcei;
- struct virtchnl_queue_pair_ext_info *vc_qpei;
- struct vf_cmd_info args;
- uint16_t i, nb_qp = vf->num_queue_pairs;
- const uint32_t size =
- I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
- uint8_t buff[size];
- int ret;
-
- memset(buff, 0, sizeof(buff));
- vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff;
- vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
- vc_vqcei->num_queue_pairs = nb_qp;
- vc_qpei = vc_vqcei->qpair;
- for (i = 0; i < nb_qp; i++, vc_qpei++) {
- i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
- vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
- i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
- vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
- vf->max_pkt_len, rxq[i]);
- if (i < dev->data->nb_rx_queues)
- /*
- * It adds extra info for configuring VSI queues, which
- * is needed to enable the configurable crc stripping
- * in VF.
- */
- vc_qpei->rxq_ext.crcstrip =
- dev->data->dev_conf.rxmode.hw_strip_crc;
- }
- memset(&args, 0, sizeof(args));
- args.ops =
- (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
- args.in_args = (uint8_t *)vc_vqcei;
- args.in_args_size = size;
- args.out_buffer = vf->aq_resp;
- args.out_size = I40E_AQ_BUF_SZ;
- ret = i40evf_execute_vf_cmd(dev, &args);
- if (ret)
- PMD_DRV_LOG(ERR, "Failed to execute command of "
- "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
-
- return ret;
-}
-
-static int
-i40evf_configure_queues(struct rte_eth_dev *dev)
-{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- /* To support DPDK PF host */
- return i40evf_configure_vsi_queues_ext(dev);
- else
- /* To support Linux PF host */
- return i40evf_configure_vsi_queues(dev);
-}
-
static int
i40evf_config_irq_map(struct rte_eth_dev *dev)
{
@@ -752,14 +650,10 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint32_t vector_id;
int i, err;
- if (rte_intr_allow_others(intr_handle)) {
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
- else
- vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
- } else {
+ if (rte_intr_allow_others(intr_handle))
+ vector_id = I40E_RX_VEC_START;
+ else
vector_id = I40E_MISC_VEC_ID;
- }
map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
map_info->num_vectors = 1;
@@ -888,7 +782,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
int err;
struct vf_cmd_info args;
- if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+ if (is_zero_ether_addr(addr)) {
PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
@@ -899,7 +793,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
- (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -939,7 +833,7 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
list = (struct virtchnl_ether_addr_list *)cmd_buffer;
list->vsi_id = vf->vsi_res->vsi_id;
list->num_elements = 1;
- (void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -968,7 +862,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
}
static int
-i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
+i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct virtchnl_queue_select q_stats;
@@ -993,26 +887,58 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
return 0;
}
-static int
-i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
{
- int ret;
- struct i40e_eth_stats *pstats = NULL;
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
- ret = i40evf_update_stats(dev, &pstats);
- if (ret != 0)
- return 0;
+ *stat &= I40E_48_BIT_MASK;
+}
- stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
- pstats->rx_broadcast;
- stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
- pstats->tx_unicast;
- stats->imissed = pstats->rx_discards;
- stats->oerrors = pstats->tx_errors + pstats->tx_discards;
- stats->ibytes = pstats->rx_bytes;
- stats->obytes = pstats->tx_bytes;
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
- return 0;
+static void
+i40evf_update_stats(struct i40e_vsi *vsi,
+ struct i40e_eth_stats *nes)
+{
+ struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+
+ i40evf_stat_update_48(&oes->rx_bytes,
+ &nes->rx_bytes);
+ i40evf_stat_update_48(&oes->rx_unicast,
+ &nes->rx_unicast);
+ i40evf_stat_update_48(&oes->rx_multicast,
+ &nes->rx_multicast);
+ i40evf_stat_update_48(&oes->rx_broadcast,
+ &nes->rx_broadcast);
+ i40evf_stat_update_32(&oes->rx_discards,
+ &nes->rx_discards);
+ i40evf_stat_update_32(&oes->rx_unknown_protocol,
+ &nes->rx_unknown_protocol);
+ i40evf_stat_update_48(&oes->tx_bytes,
+ &nes->tx_bytes);
+ i40evf_stat_update_48(&oes->tx_unicast,
+ &nes->tx_unicast);
+ i40evf_stat_update_48(&oes->tx_multicast,
+ &nes->tx_multicast);
+ i40evf_stat_update_48(&oes->tx_broadcast,
+ &nes->tx_broadcast);
+ i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
+ i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
}
static void
@@ -1022,10 +948,10 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
struct i40e_eth_stats *pstats = NULL;
/* read stat values to clear hardware registers */
- i40evf_update_stats(dev, &pstats);
+ i40evf_query_stats(dev, &pstats);
/* set stats offset base on current values */
- vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
+ vf->vsi.eth_stats_offset = *pstats;
}
static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1049,17 +975,21 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
int ret;
unsigned i;
struct i40e_eth_stats *pstats = NULL;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_vsi *vsi = &vf->vsi;
if (n < I40EVF_NB_XSTATS)
return I40EVF_NB_XSTATS;
- ret = i40evf_update_stats(dev, &pstats);
+ ret = i40evf_query_stats(dev, &pstats);
if (ret != 0)
return 0;
if (!xstats)
return 0;
+ i40evf_update_stats(vsi, pstats);
+
/* loop over xstats array and values from pstats */
for (i = 0; i < I40EVF_NB_XSTATS; i++) {
xstats[i].id = i;
@@ -1179,10 +1109,30 @@ i40evf_enable_irq0(struct i40e_hw *hw)
}
static int
-i40evf_reset_vf(struct i40e_hw *hw)
+i40evf_check_vf_reset_done(struct i40e_hw *hw)
{
int i, reset;
+ for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
+ reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(50);
+ }
+
+ if (i >= MAX_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+static int
+i40evf_reset_vf(struct i40e_hw *hw)
+{
+ int ret;
+
if (i40e_vf_reset(hw) != I40E_SUCCESS) {
PMD_INIT_LOG(ERR, "Reset VF NIC failed");
return -1;
@@ -1198,19 +1148,10 @@ i40evf_reset_vf(struct i40e_hw *hw)
*/
rte_delay_ms(200);
- for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
- reset = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
- if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset)
- break;
- else
- rte_delay_ms(50);
- }
-
- if (i >= MAX_RESET_WAIT_CNT) {
- PMD_INIT_LOG(ERR, "Reset VF NIC failed");
- return -1;
+ ret = i40evf_check_vf_reset_done(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ return ret;
}
return 0;
@@ -1233,6 +1174,10 @@ i40evf_init_vf(struct rte_eth_dev *dev)
goto err;
}
+ err = i40evf_check_vf_reset_done(hw);
+ if (err)
+ goto err;
+
i40e_init_adminq_parameter(hw);
err = i40e_init_adminq(hw);
if (err) {
@@ -1249,29 +1194,30 @@ i40evf_init_vf(struct rte_eth_dev *dev)
/* VF reset, shutdown admin queue and initialize again */
if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
- return -1;
+ goto err;
}
i40e_init_adminq_parameter(hw);
if (i40e_init_adminq(hw) != I40E_SUCCESS) {
PMD_INIT_LOG(ERR, "init_adminq failed");
- return -1;
+ goto err;
}
+
vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
if (!vf->aq_resp) {
PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
- goto err_aq;
+ goto err_aq;
}
if (i40evf_check_api_version(dev) != 0) {
PMD_INIT_LOG(ERR, "check_api version failed");
- goto err_aq;
+ goto err_api;
}
bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
if (!vf->vf_res) {
PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
- goto err_aq;
+ goto err_api;
}
if (i40evf_get_vf_resource(dev) != 0) {
@@ -1293,7 +1239,15 @@ i40evf_init_vf(struct rte_eth_dev *dev)
if (hw->mac.type == I40E_MAC_X722_VF)
vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
vf->vsi.vsi_id = vf->vsi_res->vsi_id;
- vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type;
+
+ switch (vf->vsi_res->vsi_type) {
+ case VIRTCHNL_VSI_SRIOV:
+ vf->vsi.type = I40E_VSI_SRIOV;
+ break;
+ default:
+ vf->vsi.type = I40E_VSI_TYPE_UNKNOWN;
+ break;
+ }
vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -1303,20 +1257,20 @@ i40evf_init_vf(struct rte_eth_dev *dev)
else
eth_random_addr(hw->mac.addr); /* Generate a random one */
- /* If the PF host is not DPDK, set the interval of ITR0 to max*/
- if (vf->version_major != I40E_DPDK_VERSION_MAJOR) {
- I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
- (I40E_ITR_INDEX_DEFAULT <<
- I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
- (interval <<
- I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
- I40EVF_WRITE_FLUSH(hw);
- }
+ I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
+ (I40E_ITR_INDEX_DEFAULT <<
+ I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
+ (interval <<
+ I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40EVF_WRITE_FLUSH(hw);
return 0;
err_alloc:
rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
err_aq:
i40e_shutdown_adminq(hw); /* ignore error */
err:
@@ -1476,7 +1430,6 @@ i40evf_dev_interrupt_handler(void *param)
done:
i40evf_enable_irq0(hw);
- rte_intr_enable(dev->intr_handle);
}
static int
@@ -1503,8 +1456,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
i40e_set_default_ptype_table(eth_dev);
+ i40e_set_default_pctype_table(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
@@ -1586,7 +1539,7 @@ static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
*/
static struct rte_pci_driver rte_i40evf_pmd = {
.id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_i40evf_pci_probe,
.remove = eth_i40evf_pci_remove,
};
@@ -1630,19 +1583,11 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
static int
i40evf_init_vlan(struct rte_eth_dev *dev)
{
- struct rte_eth_dev_data *data = dev->data;
- int ret;
-
/* Apply vlan offload setting */
- i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
-
- /* Apply pvid setting */
- ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
- data->dev_conf.txmode.hw_vlan_insert_pvid);
- return ret;
+ return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
}
-static void
+static int
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
@@ -1655,30 +1600,6 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
i40evf_disable_vlan_strip(dev);
}
-}
-
-static int
-i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
-{
- struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- struct i40e_vsi_vlan_pvid_info info;
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-
- memset(&info, 0, sizeof(info));
- info.on = on;
-
- /* Linux pf host don't support vlan offload yet */
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
- if (info.on)
- info.config.pvid = pvid;
- else {
- info.config.reject.tagged =
- dev_conf->txmode.hw_vlan_reject_tagged;
- info.config.reject.untagged =
- dev_conf->txmode.hw_vlan_reject_untagged;
- }
- return i40evf_config_vlan_pvid(dev, &info);
- }
return 0;
}
@@ -1899,7 +1820,6 @@ i40evf_tx_init(struct rte_eth_dev *dev)
static inline void
i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1914,25 +1834,12 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
return;
}
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- /* To support DPDK PF host */
- I40E_WRITE_REG(hw,
- I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
- I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- /* If host driver is kernel driver, do nothing.
- * Interrupt 0 is used for rx packets, but don't set
- * I40E_VFINT_DYN_CTL01,
- * because it is already done in i40evf_enable_irq0.
- */
-
I40EVF_WRITE_FLUSH(hw);
}
static inline void
i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1944,17 +1851,6 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
return;
}
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- I40E_WRITE_REG(hw,
- I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
- - 1),
- 0);
- /* If host driver is kernel driver, do nothing.
- * Interrupt 0 is used for rx packets, but don't zero
- * I40E_VFINT_DYN_CTL01,
- * because interrupt 0 is also used for adminq processing.
- */
-
I40EVF_WRITE_FLUSH(hw);
}
@@ -2050,7 +1946,7 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
addr = &dev->data->mac_addrs[i];
if (is_zero_ether_addr(addr))
continue;
- (void)rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ rte_memcpy(list->list[j].addr, addr->addr_bytes,
sizeof(addr->addr_bytes));
PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
addr->addr_bytes[0], addr->addr_bytes[1],
@@ -2124,7 +2020,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
i40evf_tx_init(dev);
- if (i40evf_configure_queues(dev) != 0) {
+ if (i40evf_configure_vsi_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "configure queues failed");
goto err_queue;
}
@@ -2141,7 +2037,20 @@ i40evf_dev_start(struct rte_eth_dev *dev)
goto err_mac;
}
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in i40evf_dev_init( ).
+ * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
+ * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
+ * queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ rte_intr_disable(intr_handle);
+ rte_intr_enable(intr_handle);
+ }
+
i40evf_enable_queues_intr(dev);
+
return 0;
err_mac:
@@ -2155,7 +2064,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2216,6 +2125,8 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
+ new_link.link_autoneg =
+ dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
i40evf_dev_atomic_write_link_status(dev, &new_link);
@@ -2295,7 +2206,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
- dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
@@ -2346,11 +2257,30 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
};
}
-static void
+static int
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- if (i40evf_get_statistics(dev, stats))
+ int ret;
+ struct i40e_eth_stats *pstats = NULL;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_vsi *vsi = &vf->vsi;
+
+ ret = i40evf_query_stats(dev, &pstats);
+ if (ret == 0) {
+ i40evf_update_stats(vsi, pstats);
+
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->imissed = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+ } else {
PMD_DRV_LOG(ERR, "Get statistics failed");
+ }
+ return ret;
}
static void
@@ -2373,6 +2303,23 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_disable_irq0(hw);
}
+/*
+ * Reset VF device only to re-initialize resources in PMD layer
+ */
+static int
+i40evf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = i40evf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = i40evf_dev_init(dev);
+
+ return ret;
+}
+
static int
i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
@@ -2580,7 +2527,7 @@ static int
i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
{
struct i40e_hw *hw = I40E_VF_TO_HW(vf);
- uint64_t rss_hf, hena;
+ uint64_t hena;
int ret;
ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
@@ -2588,14 +2535,7 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
if (ret)
return ret;
- rss_hf = rss_conf->rss_hf;
- hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
- hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- if (hw->mac.type == I40E_MAC_X722)
- hena &= ~I40E_RSS_HENA_ALL_X722;
- else
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf, hw->mac.type);
+ hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
I40EVF_WRITE_FLUSH(hw);
@@ -2607,16 +2547,9 @@ static void
i40evf_disable_rss(struct i40e_vf *vf)
{
struct i40e_hw *hw = I40E_VF_TO_HW(vf);
- uint64_t hena;
- hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
- hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- if (hw->mac.type == I40E_MAC_X722)
- hena &= ~I40E_RSS_HENA_ALL_X722;
- else
- hena &= ~I40E_RSS_HENA_ALL;
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0);
I40EVF_WRITE_FLUSH(hw);
}
@@ -2645,7 +2578,7 @@ i40evf_config_rss(struct i40e_vf *vf)
}
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
- if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
+ if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) {
i40evf_disable_rss(vf);
PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
@@ -2670,14 +2603,13 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
+ uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask;
uint64_t hena;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- if (!(hena & ((hw->mac.type == I40E_MAC_X722)
- ? I40E_RSS_HENA_ALL_X722
- : I40E_RSS_HENA_ALL))) { /* RSS disabled */
+
+ if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0;
@@ -2703,7 +2635,7 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- rss_conf->rss_hf = i40e_parse_hena(hena);
+ rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena);
return 0;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 8013add4..3d7170d5 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -49,6 +49,7 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_type.h"
@@ -71,6 +72,16 @@
#define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
#define I40E_FDIR_IPv6_PAYLOAD_LEN 380
#define I40E_FDIR_UDP_DEFAULT_LEN 400
+#define I40E_FDIR_GTP_DEFAULT_LEN 384
+#define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
+#define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
+
+#define I40E_FDIR_GTPC_DST_PORT 2123
+#define I40E_FDIR_GTPU_DST_PORT 2152
+#define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
+#define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
+#define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
+#define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
/* Wait time for fdir filter programming */
#define I40E_FDIR_MAX_WAIT_US 10000
@@ -100,13 +111,18 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
- const struct rte_eth_fdir_input *input);
+ const struct i40e_fdir_input *input);
static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
struct i40e_fdir_filter *filter);
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -249,7 +265,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
- pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ pf->fdir.dma_addr = mz->iova;
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
@@ -323,6 +339,7 @@ i40e_init_flx_pld(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint8_t pctype;
int i, index;
+ uint16_t flow_type;
/*
* Define the bytes stream extracted as flexible payload in
@@ -344,15 +361,10 @@ i40e_init_flx_pld(struct i40e_pf *pf)
/* initialize the masks */
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (hw->mac.type == I40E_MAC_X722) {
- if (!I40E_VALID_PCTYPE_X722(
- (enum i40e_filter_pctype)pctype))
- continue;
- } else {
- if (!I40E_VALID_PCTYPE(
- (enum i40e_filter_pctype)pctype))
- continue;
- }
+ flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
+
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
pf->fdir.flex_mask[pctype].word_mask = 0;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
@@ -449,7 +461,8 @@ i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
* arguments are valid
*/
static int
-i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
+i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
+ const struct rte_eth_fdir_flex_conf *conf)
{
const struct rte_eth_flex_payload_cfg *flex_cfg;
const struct rte_eth_fdir_flex_mask *flex_mask;
@@ -457,6 +470,7 @@ i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
uint8_t nb_bitmask;
uint16_t i, j;
int ret = 0;
+ enum i40e_filter_pctype pctype;
if (conf == NULL) {
PMD_DRV_LOG(INFO, "NULL pointer.");
@@ -487,7 +501,8 @@ i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
}
for (i = 0; i < conf->nb_flexmasks; i++) {
flex_mask = &conf->flex_mask[i];
- if (!I40E_VALID_FLOW(flex_mask->flow_type)) {
+ pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(WARNING, "invalid flow type.");
return -EINVAL;
}
@@ -650,7 +665,7 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
i40e_init_flx_pld(pf); /* set flex config to default value */
conf = &dev->data->dev_conf.fdir_conf.flex_conf;
- ret = i40e_check_fdir_flex_conf(conf);
+ ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
if (ret < 0) {
PMD_DRV_LOG(ERR, " invalid configuration arguments.");
return -EINVAL;
@@ -664,11 +679,11 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
/* get translated pctype value in fd pctype register */
pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
hw, I40E_GLQF_FD_PCTYPES(
- (int)i40e_flowtype_to_pctype(
+ (int)i40e_flowtype_to_pctype(pf->adapter,
conf->flex_mask[i].flow_type)));
} else
- pctype = i40e_flowtype_to_pctype(
- conf->flex_mask[i].flow_type);
+ pctype = i40e_flowtype_to_pctype(pf->adapter,
+ conf->flex_mask[i].flow_type);
i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
}
@@ -926,6 +941,358 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
ptr = payload +
pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
+ rte_memcpy(ptr,
+ &fdir_input->flow_ext.flexbytes[dst],
+ size * sizeof(uint16_t));
+ }
+
+ return 0;
+}
+
+static struct i40e_customized_pctype *
+i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
+{
+ struct i40e_customized_pctype *cus_pctype;
+ enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
+
+ for (; i < I40E_CUSTOMIZED_MAX; i++) {
+ cus_pctype = &pf->customized_pctype[i];
+ if (pctype == cus_pctype->pctype)
+ return cus_pctype;
+ }
+ return NULL;
+}
+
+static inline int
+i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
+ const struct i40e_fdir_input *fdir_input,
+ unsigned char *raw_pkt,
+ bool vlan)
+{
+ struct i40e_customized_pctype *cus_pctype = NULL;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ uint16_t *ether_type;
+ uint8_t len = 2 * sizeof(struct ether_addr);
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ uint8_t pctype = fdir_input->pctype;
+ bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
+ static const uint8_t next_proto[] = {
+ [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
+ [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
+ [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
+ };
+
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (vlan && fdir_input->flow_ext.vlan_tci) {
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &fdir_input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ if (is_customized_pctype) {
+ cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+ if (!cus_pctype) {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+ }
+
+ if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+ *ether_type = fdir_input->flow.l2_flow.ether_type;
+ else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
+ is_customized_pctype) {
+ ip = (struct ipv4_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ /* set len to by default */
+ ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
+ ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
+ fdir_input->flow.ip4_flow.ttl :
+ I40E_FDIR_IP_DEFAULT_TTL;
+ ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
+ ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
+
+ if (!is_customized_pctype)
+ ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
+ fdir_input->flow.ip4_flow.proto :
+ next_proto[fdir_input->pctype];
+ else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU)
+ ip->next_proto_id = IPPROTO_UDP;
+ len += sizeof(struct ipv4_hdr);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
+ pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (fdir_input->flow.ipv6_flow.tc <<
+ I40E_FDIR_IPv6_TC_OFFSET));
+ ip6->payload_len =
+ rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ ip6->proto = fdir_input->flow.ipv6_flow.proto ?
+ fdir_input->flow.ipv6_flow.proto :
+ next_proto[fdir_input->pctype];
+ ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
+ fdir_input->flow.ipv6_flow.hop_limits :
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ rte_memcpy(&ip6->src_addr,
+ &fdir_input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr,
+ &fdir_input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ } else {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+
+ return len;
+}
+
+/**
+ * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
+ * @pf: board private structure
+ * @fdir_input: input set of the flow director entry
+ * @raw_pkt: a packet to be constructed
+ */
+static int
+i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
+ const struct i40e_fdir_input *fdir_input,
+ unsigned char *raw_pkt)
+{
+ unsigned char *payload = NULL;
+ unsigned char *ptr;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ struct sctp_hdr *sctp;
+ struct rte_flow_item_gtp *gtp;
+ struct ipv4_hdr *gtp_ipv4;
+ struct ipv6_hdr *gtp_ipv6;
+ uint8_t size, dst = 0;
+ uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
+ int len;
+ uint8_t pctype = fdir_input->pctype;
+ struct i40e_customized_pctype *cus_pctype;
+
+ /* raw pcket template - just copy contents of the raw packet */
+ if (fdir_input->flow_ext.pkt_template) {
+ memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
+ fdir_input->flow.raw_flow.length);
+ return 0;
+ }
+
+ /* fill the ethernet and IP head */
+ len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
+ !!fdir_input->flow_ext.vlan_tci);
+ if (len < 0)
+ return -EINVAL;
+
+ /* fill the L4 head */
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp4_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp4_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ udp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ udp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
+ tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
+ tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
+ sctp = (struct sctp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
+ /**
+ * The source and destination fields in the transmitted packet
+ * need to be presented in a reversed order with respect
+ * to the expected received packets.
+ */
+ sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
+ sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
+ sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
+ pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
+ payload = raw_pkt + len;
+ set_idx = I40E_FLXPLD_L3_IDX;
+ } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ payload = raw_pkt + len;
+ /**
+ * ARP packet is a special case on which the payload
+ * starts after the whole ARP header
+ */
+ if (fdir_input->flow.l2_flow.ether_type ==
+ rte_cpu_to_be_16(ETHER_TYPE_ARP))
+ payload += sizeof(struct arp_hdr);
+ set_idx = I40E_FLXPLD_L2_IDX;
+ } else if (fdir_input->flow_ext.customized_pctype) {
+ /* If customized pctype is used */
+ cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
+ cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dgram_len =
+ rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
+
+ gtp = (struct rte_flow_item_gtp *)
+ ((unsigned char *)udp + sizeof(struct udp_hdr));
+ gtp->msg_len =
+ rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
+ gtp->teid = fdir_input->flow.gtp_flow.teid;
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
+
+ /* GTP-C message type is not supported. */
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
+ udp->dst_port =
+ rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
+ gtp->v_pt_rsv_flags =
+ I40E_FDIR_GTP_VER_FLAG_0X32;
+ } else {
+ udp->dst_port =
+ rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
+ gtp->v_pt_rsv_flags =
+ I40E_FDIR_GTP_VER_FLAG_0X30;
+ }
+
+ if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+ gtp_ipv4 = (struct ipv4_hdr *)
+ ((unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp));
+ gtp_ipv4->version_ihl =
+ I40E_FDIR_IP_DEFAULT_VERSION_IHL;
+ gtp_ipv4->next_proto_id = IPPROTO_IP;
+ gtp_ipv4->total_length =
+ rte_cpu_to_be_16(
+ I40E_FDIR_INNER_IP_DEFAULT_LEN);
+ payload = (unsigned char *)gtp_ipv4 +
+ sizeof(struct ipv4_hdr);
+ } else if (cus_pctype->index ==
+ I40E_CUSTOMIZED_GTPU_IPV6) {
+ gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
+ gtp_ipv6 = (struct ipv6_hdr *)
+ ((unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp));
+ gtp_ipv6->vtc_flow =
+ rte_cpu_to_be_32(
+ I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
+ (0 << I40E_FDIR_IPv6_TC_OFFSET));
+ gtp_ipv6->proto = IPPROTO_NONE;
+ gtp_ipv6->payload_len =
+ rte_cpu_to_be_16(
+ I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
+ gtp_ipv6->hop_limits =
+ I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
+ payload = (unsigned char *)gtp_ipv6 +
+ sizeof(struct ipv6_hdr);
+ } else
+ payload = (unsigned char *)gtp +
+ sizeof(struct rte_flow_item_gtp);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "unknown pctype %u.",
+ fdir_input->pctype);
+ return -1;
+ }
+
+ /* fill the flexbytes to payload */
+ for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
+ pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
+ size = pf->fdir.flex_set[pit_idx].size;
+ if (size == 0)
+ continue;
+ dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
+ ptr = payload +
+ pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
(void)rte_memcpy(ptr,
&fdir_input->flow_ext.flexbytes[dst],
size * sizeof(uint16_t));
@@ -1007,21 +1374,34 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
}
static int
-i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
struct i40e_fdir_filter *filter)
{
- rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
+ if (input->input.flow_ext.pkt_template) {
+ filter->fdir.input.flow.raw_flow.packet = NULL;
+ filter->fdir.input.flow.raw_flow.length =
+ rte_hash_crc(input->input.flow.raw_flow.packet,
+ input->input.flow.raw_flow.length,
+ input->input.flow.raw_flow.pctype);
+ }
return 0;
}
/* Check if there exists the flow director filter */
static struct i40e_fdir_filter *
i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
- const struct rte_eth_fdir_input *input)
+ const struct i40e_fdir_input *input)
{
int ret;
- ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (input->flow_ext.pkt_template)
+ ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
+ (const void *)input,
+ input->flow.raw_flow.length);
+ else
+ ret = rte_hash_lookup(fdir_info->hash_table,
+ (const void *)input);
if (ret < 0)
return NULL;
@@ -1035,8 +1415,13 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
struct i40e_fdir_info *fdir_info = &pf->fdir;
int ret;
- ret = rte_hash_add_key(fdir_info->hash_table,
- &filter->fdir.input);
+ if (filter->fdir.input.flow_ext.pkt_template)
+ ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
+ &filter->fdir.input,
+ filter->fdir.input.flow.raw_flow.length);
+ else
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"Failed to insert fdir filter to hash table %d!",
@@ -1052,13 +1437,18 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
/* Delete a flow director filter from the SW list */
int
-i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
{
struct i40e_fdir_info *fdir_info = &pf->fdir;
struct i40e_fdir_filter *filter;
int ret;
- ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (input->flow_ext.pkt_template)
+ ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
+ input,
+ input->flow.raw_flow.length);
+ else
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"Failed to delete fdir filter to hash table %d!",
@@ -1082,16 +1472,13 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
*/
int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *filter,
- bool add)
+ const struct rte_eth_fdir_filter *filter,
+ bool add)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
- struct i40e_fdir_info *fdir_info = &pf->fdir;
- struct i40e_fdir_filter *fdir_filter, *node;
- struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1100,7 +1487,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -ENOTSUP;
}
- if (!I40E_VALID_FLOW(filter->input.flow_type)) {
+ pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
@@ -1114,6 +1502,76 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ memset(pkt, 0, I40E_FDIR_PKT_LEN);
+
+ ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
+ return ret;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
+
+ ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
+ pctype);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
+ * @pf: board private structure
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+int
+i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+ enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
+ int ret = 0;
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "Invalid queue ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID");
+ return -EINVAL;
+ }
+ if (filter->input.flow_ext.pkt_template) {
+ if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
+ !filter->input.flow.raw_flow.packet) {
+ PMD_DRV_LOG(ERR, "Invalid raw packet template"
+ " flow filter parameters!");
+ return -EINVAL;
+ }
+ pctype = filter->input.flow.raw_flow.pctype;
+ } else {
+ pctype = filter->input.pctype;
+ }
+
/* Check if there is the filter in SW list */
memset(&check_filter, 0, sizeof(check_filter));
i40e_fdir_filter_convert(filter, &check_filter);
@@ -1132,7 +1590,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
memset(pkt, 0, I40E_FDIR_PKT_LEN);
- ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
+ ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
if (ret < 0) {
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
return ret;
@@ -1141,13 +1599,10 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
if (hw->mac.type == I40E_MAC_X722) {
/* get translated pctype value in fd pctype register */
pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
- hw, I40E_GLQF_FD_PCTYPES(
- (int)i40e_flowtype_to_pctype(
- filter->input.flow_type)));
- } else
- pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+ hw, I40E_GLQF_FD_PCTYPES((int)pctype));
+ }
- ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
+ ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
pctype);
@@ -1302,6 +1757,140 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
}
/*
+ * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
+ * Is done by Flow Director Programming Descriptor followed by packet
+ * structure that contains the filter fields need to match.
+ * @pf: board private structure
+ * @pctype: pctype
+ * @filter: fdir filter entry
+ * @add: 0 - delete, 1 - add
+ */
+static int
+i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
+ enum i40e_filter_pctype pctype,
+ const struct i40e_fdir_filter_conf *filter,
+ bool add)
+{
+ struct i40e_tx_queue *txq = pf->fdir.txq;
+ struct i40e_rx_queue *rxq = pf->fdir.rxq;
+ const struct i40e_fdir_action *fdir_action = &filter->action;
+ volatile struct i40e_tx_desc *txdp;
+ volatile struct i40e_filter_program_desc *fdirdp;
+ uint32_t td_cmd;
+ uint16_t vsi_id, i;
+ uint8_t dest;
+
+ PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
+ fdirdp = (volatile struct i40e_filter_program_desc *)
+ (&txq->tx_ring[txq->tx_tail]);
+
+ fdirdp->qindex_flex_ptype_vsi =
+ rte_cpu_to_le_32((fdir_action->rx_queue <<
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((fdir_action->flex_off <<
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32((pctype <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ if (filter->input.flow_ext.is_vf)
+ vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
+ else
+ /* Use LAN VSI Id by default */
+ vsi_id = pf->main_vsi->vsi_id;
+ fdirdp->qindex_flex_ptype_vsi |=
+ rte_cpu_to_le_32(((uint32_t)vsi_id <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdirdp->dtype_cmd_cntindex =
+ rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ if (fdir_action->behavior == I40E_FDIR_REJECT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+ else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
+ dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
+ else {
+ PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
+ return -EINVAL;
+ }
+
+ fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32((fdir_action->report_status <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdirdp->dtype_cmd_cntindex |=
+ rte_cpu_to_le_32(
+ ((uint32_t)pf->fdir.match_counter_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+
+ fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
+
+ PMD_DRV_LOG(INFO, "filling transmit descriptor.");
+ txdp = &txq->tx_ring[txq->tx_tail + 1];
+ txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ txdp->cmd_type_offset_bsz =
+ i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
+
+ txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ /* Update the tx tail register */
+ rte_wmb();
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
+ if ((txdp->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ break;
+ rte_delay_us(1);
+ }
+ if (i >= I40E_FDIR_MAX_WAIT_US) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: time out to get DD on tx queue.");
+ return -ETIMEDOUT;
+ }
+ /* totally delay 10 ms to check programming status*/
+ rte_delay_us(I40E_FDIR_MAX_WAIT_US);
+ if (i40e_check_fdir_programming_status(rxq) < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to program FDIR filter: programming status reported.");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
@@ -1384,7 +1973,6 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
{
struct i40e_fdir_flex_mask *mask;
struct rte_eth_fdir_flex_mask *ptr = flex_mask;
- struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t flow_type;
uint8_t i, j;
uint16_t off_bytes, mask_tmp;
@@ -1393,14 +1981,11 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
i++) {
mask = &pf->fdir.flex_mask[i];
- if (hw->mac.type == I40E_MAC_X722) {
- if (!I40E_VALID_PCTYPE_X722((enum i40e_filter_pctype)i))
- continue;
- } else {
- if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
- continue;
- }
- flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
+ flow_type = i40e_pctype_to_flowtype(pf->adapter,
+ (enum i40e_filter_pctype)i);
+ if (flow_type == RTE_ETH_FLOW_UNKNOWN)
+ continue;
+
for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
@@ -1580,7 +2165,7 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
uint32_t best_cnt; /**< Number of filters in best effort spaces. */
TAILQ_FOREACH(f, fdir_list, rules)
- i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+ i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
guarant_cnt =
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b92719a3..7e4936e3 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -41,7 +41,6 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_log.h>
-#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
@@ -84,11 +83,11 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
- struct rte_eth_fdir_filter *filter);
+ struct i40e_fdir_filter_conf *filter);
static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
- struct rte_eth_fdir_filter *filter);
+ struct i40e_fdir_filter_conf *filter);
static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
@@ -125,6 +124,12 @@ static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -189,6 +194,40 @@ static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_fdir_ipv6[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
@@ -216,6 +255,40 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPC,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_GTPU,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_RAW,
@@ -1576,10 +1649,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
/* FDIR - support default flow type with flexible payload */
{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
@@ -1732,6 +1813,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* GTP-C & GTP-U */
+ { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
+ { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
/* QINQ */
{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
};
@@ -2302,20 +2388,58 @@ i40e_flow_set_fdir_inset(struct i40e_pf *pf,
return 0;
}
+static uint8_t
+i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
+ enum rte_flow_item_type item_type,
+ struct i40e_fdir_filter_conf *filter)
+{
+ struct i40e_customized_pctype *cus_pctype = NULL;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPC);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ if (!filter->input.flow_ext.inner_ip)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU);
+ else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV4)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV4);
+ else if (filter->input.flow_ext.iip_type ==
+ I40E_FDIR_IPTYPE_IPV6)
+ cus_pctype = i40e_find_customized_pctype(pf,
+ I40E_CUSTOMIZED_GTPU_IPV6);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported item type");
+ break;
+ }
+
+ if (cus_pctype)
+ return cus_pctype->pctype;
+
+ return I40E_FILTER_PCTYPE_INVALID;
+}
+
/* 1. Last in item should be NULL as range is not supported.
* 2. Supported patterns: refer to array i40e_supported_patterns.
- * 3. Supported flow type and input set: refer to array
+ * 3. Default supported flow type and input set: refer to array
* valid_fdir_inset_table in i40e_ethdev.c.
* 4. Mask of fields which need to be matched should be
* filled with 1.
* 5. Mask of fields which needn't to be matched should be
* filled with 0.
+ * 6. GTP profile supports GTPv1 only.
+ * 7. GTP-C response message ('source_port' = 2123) is not supported.
*/
static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
- struct rte_eth_fdir_filter *filter)
+ struct i40e_fdir_filter_conf *filter)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_item *item = pattern;
@@ -2326,15 +2450,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_vf *vf_spec;
- uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
- enum i40e_filter_pctype pctype;
+ uint8_t pctype = 0;
uint64_t input_set = I40E_INSET_NONE;
uint16_t frag_off;
enum rte_flow_item_type item_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
uint32_t i, j;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -2352,12 +2477,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
uint16_t outer_tpid;
uint16_t ether_type;
uint32_t vtc_flow_cpu;
+ bool outer_ip = true;
int ret;
memset(off_arr, 0, sizeof(off_arr));
memset(len_arr, 0, sizeof(len_arr));
memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
outer_tpid = i40e_get_outer_vlan(dev);
+ filter->input.flow_ext.customized_pctype = false;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -2402,7 +2529,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
}
- flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
break;
@@ -2420,7 +2547,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
}
- flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
break;
@@ -2430,8 +2557,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv4 *)item->spec;
ipv4_mask =
(const struct rte_flow_item_ipv4 *)item->mask;
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
- if (ipv4_spec && ipv4_mask) {
+ if (ipv4_spec && ipv4_mask && outer_ip) {
/* Check IPv4 mask and update input set */
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.total_length ||
@@ -2456,14 +2585,12 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
input_set |= I40E_INSET_IPV4_PROTO;
- /* Get filter info */
- flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
/* Check if it is fragment. */
frag_off = ipv4_spec->hdr.fragment_offset;
frag_off = rte_be_to_cpu_16(frag_off);
if (frag_off & IPV4_HDR_OFFSET_MASK ||
frag_off & IPV4_HDR_MF_FLAG)
- flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
/* Get the filter info */
filter->input.flow.ip4_flow.proto =
@@ -2476,9 +2603,20 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
ipv4_spec->hdr.src_addr;
filter->input.flow.ip4_flow.dst_ip =
ipv4_spec->hdr.dst_addr;
+ } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
+ filter->input.flow_ext.inner_ip = true;
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV4;
+ } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv4 mask.");
+ return -rte_errno;
}
- layer_idx = I40E_FLXPLD_L3_IDX;
+ if (outer_ip)
+ outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
@@ -2487,8 +2625,10 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
(const struct rte_flow_item_ipv6 *)item->spec;
ipv6_mask =
(const struct rte_flow_item_ipv6 *)item->mask;
+ pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ layer_idx = I40E_FLXPLD_L3_IDX;
- if (ipv6_spec && ipv6_mask) {
+ if (ipv6_spec && ipv6_mask && outer_ip) {
/* Check IPv6 mask and update input set */
if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
@@ -2535,20 +2675,32 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
/* Check if it is fragment. */
if (ipv6_spec->hdr.proto ==
I40E_IPV6_FRAG_HEADER)
- flow_type =
- RTE_ETH_FLOW_FRAG_IPV6;
- else
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
+ filter->input.flow_ext.inner_ip = true;
+ filter->input.flow_ext.iip_type =
+ I40E_FDIR_IPTYPE_IPV6;
+ } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner IPv6 mask");
+ return -rte_errno;
}
- layer_idx = I40E_FLXPLD_L3_IDX;
-
+ if (outer_ip)
+ outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
if (tcp_spec && tcp_mask) {
/* Check TCP mask and update input set */
if (tcp_mask->hdr.sent_seq ||
@@ -2571,13 +2723,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
input_set |= I40E_INSET_DST_PORT;
/* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
-
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
filter->input.flow.tcp4_flow.src_port =
tcp_spec->hdr.src_port;
@@ -2598,6 +2743,13 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
udp_spec = (const struct rte_flow_item_udp *)item->spec;
udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+
if (udp_spec && udp_mask) {
/* Check UDP mask and update input set*/
if (udp_mask->hdr.dgram_len ||
@@ -2615,13 +2767,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
input_set |= I40E_INSET_DST_PORT;
/* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
-
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
filter->input.flow.udp4_flow.src_port =
udp_spec->hdr.src_port;
@@ -2638,12 +2783,50 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
layer_idx = I40E_FLXPLD_L4_IDX;
break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ if (!pf->gtp_support) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported protocol");
+ return -rte_errno;
+ }
+
+ gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
+ gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+
+ if (gtp_spec && gtp_mask) {
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len ||
+ gtp_mask->teid != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ filter->input.flow.gtp_flow.teid =
+ gtp_spec->teid;
+ filter->input.flow_ext.customized_pctype = true;
+ cus_proto = item_type;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_SCTP:
sctp_spec =
(const struct rte_flow_item_sctp *)item->spec;
sctp_mask =
(const struct rte_flow_item_sctp *)item->mask;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ pctype =
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+
if (sctp_spec && sctp_mask) {
/* Check SCTP mask and update input set */
if (sctp_mask->hdr.cksum) {
@@ -2662,13 +2845,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
input_set |= I40E_INSET_SCTP_VT;
/* Get filter info */
- if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
- else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
- flow_type =
- RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
-
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
filter->input.flow.sctp4_flow.src_port =
sctp_spec->hdr.src_port;
@@ -2776,51 +2952,58 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
}
- pctype = i40e_flowtype_to_pctype(flow_type);
- if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Unsupported flow type");
- return -rte_errno;
+ /* Get customized pctype value */
+ if (filter->input.flow_ext.customized_pctype) {
+ pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
+ if (pctype == I40E_FILTER_PCTYPE_INVALID) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported pctype");
+ return -rte_errno;
+ }
}
- ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
- if (ret == -1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Conflict with the first rule's input set.");
- return -rte_errno;
- } else if (ret == -EINVAL) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Invalid pattern mask.");
- return -rte_errno;
- }
+ /* If customized pctype is not used, set fdir configuration.*/
+ if (!filter->input.flow_ext.customized_pctype) {
+ ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Conflict with the first rule's input set.");
+ return -rte_errno;
+ } else if (ret == -EINVAL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid pattern mask.");
+ return -rte_errno;
+ }
- filter->input.flow_type = flow_type;
+ /* Store flex mask to SW */
+ ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
+ if (ret == -1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Exceed maximal number of bitmasks");
+ return -rte_errno;
+ } else if (ret == -2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Conflict with the first flexible rule");
+ return -rte_errno;
+ } else if (ret > 0)
+ cfg_flex_msk = false;
- /* Store flex mask to SW */
- ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
- if (ret == -1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Exceed maximal number of bitmasks");
- return -rte_errno;
- } else if (ret == -2) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Conflict with the first flexible rule");
- return -rte_errno;
- } else if (ret > 0)
- cfg_flex_msk = false;
+ if (cfg_flex_pit)
+ i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
- if (cfg_flex_pit)
- i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
+ if (cfg_flex_msk)
+ i40e_flow_set_fdir_flex_msk(pf, pctype);
+ }
- if (cfg_flex_msk)
- i40e_flow_set_fdir_flex_msk(pf, pctype);
+ filter->input.pctype = pctype;
return 0;
}
@@ -2832,7 +3015,7 @@ static int
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
- struct rte_eth_fdir_filter *filter)
+ struct i40e_fdir_filter_conf *filter)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_action *act;
@@ -2855,13 +3038,13 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
"Invalid queue ID for FDIR.");
return -rte_errno;
}
- filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ filter->action.behavior = I40E_FDIR_ACCEPT;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- filter->action.behavior = RTE_ETH_FDIR_REJECT;
+ filter->action.behavior = I40E_FDIR_REJECT;
break;
case RTE_FLOW_ACTION_TYPE_PASSTHRU:
- filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
+ filter->action.behavior = I40E_FDIR_PASSTHRU;
break;
default:
rte_flow_error_set(error, EINVAL,
@@ -2876,11 +3059,11 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
mark_spec = (const struct rte_flow_action_mark *)act->conf;
- filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
- filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
break;
case RTE_FLOW_ACTION_TYPE_END:
return 0;
@@ -2911,7 +3094,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error,
union i40e_filter_t *filter)
{
- struct rte_eth_fdir_filter *fdir_filter =
+ struct i40e_fdir_filter_conf *fdir_filter =
&filter->fdir_filter;
int ret;
@@ -3646,6 +3829,148 @@ i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
}
/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: GTP TEID.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ * 5. GTP profile supports GTPv1 only.
+ * 6. GTP-C response message ('source_port' = 2123) is not supported.
+ */
+static int
+i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_gtp *gtp_spec;
+ const struct rte_flow_item_gtp *gtp_mask;
+ enum rte_flow_item_type item_type;
+
+ if (!pf->gtp_support) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "GTP is not supported by default.");
+ return -rte_errno;
+ }
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ gtp_spec =
+ (const struct rte_flow_item_gtp *)item->spec;
+ gtp_mask =
+ (const struct rte_flow_item_gtp *)item->mask;
+
+ if (!gtp_spec || !gtp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP item");
+ return -rte_errno;
+ }
+
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len ||
+ gtp_mask->teid != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
+ else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
+
+ filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_gtp_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
* 2. Supported filter types: QINQ.
* 3. Mask of fields which need to be matched should be
* filled with 1.
@@ -3877,7 +4202,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
i40e_ethertype_filter_list);
break;
case RTE_ETH_FILTER_FDIR:
- ret = i40e_add_del_fdir_filter(dev,
+ ret = i40e_flow_add_del_fdir_filter(dev,
&cons_filter.fdir_filter, 1);
if (ret)
goto free_flow;
@@ -3927,7 +4252,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
(struct i40e_tunnel_filter *)flow->rule);
break;
case RTE_ETH_FILTER_FDIR:
- ret = i40e_add_del_fdir_filter(dev,
+ ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
default:
@@ -4016,12 +4341,12 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
vsi = vf->vsi;
}
- if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
- ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
- I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
- ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
- I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
+ I40E_AQC_ADD_CLOUD_FILTER_0X10))
big_buffer = 1;
if (big_buffer)
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 100f8dc2..94bb0cfd 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -44,7 +44,6 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -538,73 +537,6 @@ send_msg:
return ret;
}
-static int
-i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
-{
- struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
- struct i40e_vsi *vsi = vf->vsi;
- struct virtchnl_vsi_queue_config_ext_info *vc_vqcei =
- (struct virtchnl_vsi_queue_config_ext_info *)msg;
- struct virtchnl_queue_pair_ext_info *vc_qpei;
- int i, ret = I40E_SUCCESS;
-
- if (!b_op) {
- i40e_pf_host_send_msg_to_vf(
- vf,
- VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- I40E_NOT_SUPPORTED, NULL, 0);
- return ret;
- }
-
- if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
- vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
- msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
- vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
-
- vc_qpei = vc_vqcei->qpair;
- for (i = 0; i < vc_vqcei->num_queue_pairs; i++) {
- if (vc_qpei[i].rxq.queue_id > vsi->nb_qps - 1 ||
- vc_qpei[i].txq.queue_id > vsi->nb_qps - 1) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
- /*
- * Apply VF RX queue setting to HMC.
- * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- * then the extra information of
- * 'struct virtchnl_queue_pair_ext_info' is needed,
- * otherwise set the last parameter to NULL.
- */
- if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq,
- vc_qpei[i].rxq_ext.crcstrip) != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Configure RX queue HMC failed");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
-
- /* Apply VF TX queue setting to HMC */
- if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpei[i].txq) !=
- I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Configure TX queue HMC failed");
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
- }
-
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
- ret, NULL, 0);
-
- return ret;
-}
-
static void
i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
struct virtchnl_vector_map *vvm)
@@ -714,7 +646,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
int i;
- uint16_t vector_id;
+ uint16_t vector_id, itr_idx;
unsigned long qbit_max;
if (!b_op) {
@@ -741,12 +673,13 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
vf->vsi->nb_msix = irqmap->num_vectors;
vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ itr_idx = irqmap->vecmap[0].rxitr_idx;
/* Don't care how the TX/RX queue mapping with this vector.
* Link all VF RX queues together. Only did mapping work.
* VF can disable/enable the intr by itself.
*/
- i40e_vsi_queues_bind_intr(vf->vsi);
+ i40e_vsi_queues_bind_intr(vf->vsi, itr_idx);
goto send_msg;
}
@@ -909,7 +842,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct ether_addr *)(addr_list->list[i].addr);
- (void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
if (is_zero_ether_addr(mac) ||
i40e_vsi_add_mac(vf->vsi, &filter)) {
@@ -1157,42 +1090,13 @@ i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
return ret;
}
-static int
-i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
- uint8_t *msg,
- uint16_t msglen,
- bool b_op)
-{
- int ret = I40E_SUCCESS;
- struct virtchnl_pvid_info *tpid_info =
- (struct virtchnl_pvid_info *)msg;
-
- if (!b_op) {
- i40e_pf_host_send_msg_to_vf(
- vf,
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- I40E_NOT_SUPPORTED, NULL, 0);
- return ret;
- }
-
- if (msg == NULL || msglen != sizeof(*tpid_info)) {
- ret = I40E_ERR_PARAM;
- goto send_msg;
- }
-
- ret = i40e_vsi_vlan_pvid_set(vf->vsi, &tpid_info->info);
-
-send_msg:
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
- ret, NULL, 0);
-
- return ret;
-}
-
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct virtchnl_pf_event event;
+ uint16_t vf_id = vf->vf_idx;
+ uint32_t tval, rval;
event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
@@ -1224,8 +1128,15 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
break;
}
- i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
+ rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
+
+ if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
+ tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
+ rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
+ rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
void
@@ -1300,11 +1211,6 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
msglen, b_op);
break;
- case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
- PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
- i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen, b_op);
- break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
@@ -1359,10 +1265,6 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
- case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
- PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
- break;
/* Don't add command supported below, which will
* return an error code.
*/
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 7afb7eae..04116637 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -34,58 +34,9 @@
#ifndef _I40E_PF_H_
#define _I40E_PF_H_
-/* VERSION info to exchange between VF and PF host. In case VF works with
- * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In
- * case works with DPDK host, it reads version below. Then VF realize who it
- * is talking to and use proper language to communicate.
- * */
-#define I40E_DPDK_SIGNATURE ('D' << 24 | 'P' << 16 | 'D' << 8 | 'K')
-#define I40E_DPDK_VERSION_MAJOR I40E_DPDK_SIGNATURE
-#define I40E_DPDK_VERSION_MINOR 0
-
/* Default setting on number of VSIs that VF can contain */
#define I40E_DEFAULT_VF_VSI_NUM 1
-#define I40E_DPDK_OFFSET 0x100
-
-/* DPDK pf driver specific command to VF */
-enum virtchnl_ops_dpdk {
- /*
- * Keep some gap between Linux PF commands and
- * DPDK PF extended commands.
- */
- I40E_VIRTCHNL_OP_CFG_VLAN_PVID = VIRTCHNL_OP_VERSION +
- I40E_DPDK_OFFSET,
- VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
-};
-
-/* A structure to support extended info of a receive queue. */
-struct virtchnl_rxq_ext_info {
- uint8_t crcstrip;
-};
-
-/*
- * A structure to support extended info of queue pairs, an additional field
- * is added, comparing to original 'struct virtchnl_queue_pair_info'.
- */
-struct virtchnl_queue_pair_ext_info {
- /* vsi_id and queue_id should be identical for both rx and tx queues.*/
- struct virtchnl_txq_info txq;
- struct virtchnl_rxq_info rxq;
- struct virtchnl_rxq_ext_info rxq_ext;
-};
-
-/*
- * A structure to support extended info of VSI queue pairs,
- * 'struct virtchnl_queue_pair_ext_info' is used, see its original
- * of 'struct virtchnl_queue_pair_info'.
- */
-struct virtchnl_vsi_queue_config_ext_info {
- uint16_t vsi_id;
- uint16_t num_queue_pairs;
- struct virtchnl_queue_pair_ext_info qpair[0];
-};
-
struct virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
@@ -99,17 +50,6 @@ struct virtchnl_vlan_offload_info {
#define I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(x, n) \
(sizeof(*(x)) + sizeof((x)->qpair[0]) * (n))
-/*
- * I40E_VIRTCHNL_OP_CFG_VLAN_PVID
- * VF sends this message to enable/disable pvid. If it's
- * enable op, needs to specify the pvid. PF returns status
- * code in retval.
- */
-struct virtchnl_pvid_info {
- uint16_t vsi_id;
- struct i40e_vsi_vlan_pvid_info info;
-};
-
int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset);
void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index d42c23c0..8b4f612f 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -108,7 +108,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@@ -589,7 +589,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- rte_mbuf_data_dma_addr_default(mb));
+ rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
@@ -752,7 +752,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -869,7 +869,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
@@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
@@ -1301,7 +1301,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1315,7 +1315,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
{
uint64_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1734,36 +1734,42 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
uint16_t len, i;
- uint16_t base, bsf, tc_mapping;
- int use_def_burst_func = 1;
+ uint16_t reg_idx, base, bsf, tc_mapping;
+ int q_offset, use_def_burst_func = 1;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- struct i40e_vf *vf =
- I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
- if (vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI not available or queue "
- "index exceeds the maximum");
- return I40E_ERR_PARAM;
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
}
+
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
"invalid", nb_desc);
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/* Free memory if needed */
@@ -1786,12 +1792,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
- rxq->reg_idx = queue_idx;
- else /* PF device */
- rxq->reg_idx = vsi->base_queue +
- i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+ rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
@@ -1822,7 +1823,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
@@ -2012,34 +2013,40 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf = NULL;
+ struct i40e_vf *vf = NULL;
struct i40e_tx_queue *txq;
const struct rte_memzone *tz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
- uint16_t i, base, bsf, tc_mapping;
+ uint16_t reg_idx, i, base, bsf, tc_mapping;
+ int q_offset;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
- struct i40e_vf *vf =
- I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else
+ if (!vsi)
+ return -EINVAL;
+ reg_idx = queue_idx;
+ } else {
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
- if (vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
- "exceeds the maximum", queue_idx);
- return I40E_ERR_PARAM;
+ if (!vsi)
+ return -EINVAL;
+ q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+ if (q_offset < 0)
+ return -EINVAL;
+ reg_idx = vsi->base_queue + q_offset;
}
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
- (nb_desc > I40E_MAX_RING_DESC) ||
- (nb_desc < I40E_MIN_RING_DESC)) {
+ (nb_desc > I40E_MAX_RING_DESC) ||
+ (nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
"invalid", nb_desc);
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/**
@@ -2148,18 +2155,13 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
- if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
- txq->reg_idx = queue_idx;
- else /* PF device */
- txq->reg_idx = vsi->base_queue +
- i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+ txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2221,12 +2223,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
if (mz)
return mz;
- if (rte_xen_dom0_supported())
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
- else
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_RING_BASE_ALIGN);
+ mz = rte_memzone_reserve_aligned(name, len,
+ socket_id, 0, I40E_RING_BASE_ALIGN);
return mz;
}
@@ -2307,18 +2305,40 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
void
i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
{
+ struct rte_eth_dev *dev;
uint16_t i;
+ dev = &rte_eth_devices[txq->port_id];
+
if (!txq || !txq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
}
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_ring[i].mbuf) {
+ /**
+ * vPMD tx will not set sw_ring's mbuf to NULL after free,
+ * so need to free remains more carefully.
+ */
+ if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+ i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
+ } else {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
}
}
@@ -2431,7 +2451,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
@@ -2675,7 +2695,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -2731,7 +2751,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
@@ -2941,6 +2961,64 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev)
ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
}
+void __attribute__((cold))
+i40e_set_default_pctype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ ad->pctypes_mask = 0ULL;
+
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ }
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (ad->pctypes_tbl[i])
+ ad->flow_types_mask |= (1ULL << i);
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+ }
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
int __attribute__((weak))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 20084d64..06c6a659 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -121,7 +121,7 @@ struct i40e_rx_queue {
uint16_t rxrearm_start; /**< the idx we start the re-arming from */
uint64_t mbuf_initializer; /**< value to init mbufs */
- uint8_t port_id; /**< device port ID */
+ uint16_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
uint16_t reg_idx; /**< RX queue register index */
@@ -167,7 +167,7 @@ struct i40e_tx_queue {
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
uint32_t txq_flags;
@@ -255,6 +255,7 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
+void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
/* For each value it means, datasheet of hardware can tell more details
*
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index f4036ea2..5e4e472a 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -100,7 +100,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
p1 = (uintptr_t)&mb1->rearm_data;
*(uint64_t *)p1 = rxq->mbuf_initializer;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
@@ -146,7 +146,7 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
/* map rss and vlan type to rss hash and vlan flag */
const vector unsigned char vlan_flags = (vector unsigned char){
0, 0, 0, 0,
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
@@ -538,7 +538,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
vector unsigned long descriptor = (vector unsigned long){
- pkt->buf_physaddr + pkt->data_off, high_qw};
+ pkt->buf_iova + pkt->data_off, high_qw};
*(vector unsigned long *)txdp = descriptor;
}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 694e91f3..b5685e2b 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -81,13 +81,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vdupq_n_u64(paddr);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
- paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vdupq_n_u64(paddr);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
}
@@ -137,7 +137,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
@@ -197,8 +197,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
}
#define PKTLEN_SHIFT 10
-
-#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
+#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
static inline void
desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
@@ -230,7 +229,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
struct i40e_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
int pos;
- uint64_t var;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
/* mask to shuffle from desc. to mbuf */
@@ -364,7 +362,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.2 get 4 pkts staterr value */
staterr = vzipq_u16(sterr_tmp1.val[1],
sterr_tmp2.val[1]).val[0];
- stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
@@ -429,6 +426,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rx_pkts[pos + 3]->next = NULL;
}
+ staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1);
+ staterr = vreinterpretq_u16_s16(
+ vshrq_n_s16(vreinterpretq_s16_u16(staterr),
+ I40E_UINT16_BIT - 1));
+ stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+
rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
/* D.3 copy final 1,2 data to rx_pkts */
@@ -438,10 +441,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
- var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
- nb_pkts_recd += var;
- if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ if (unlikely(stat == 0)) {
+ nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
+ } else {
+ nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT;
break;
+ }
}
/* Update our internal tail pointer */
@@ -515,7 +520,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
- uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
+ uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw};
vst1q_u64((uint64_t *)txdp, descriptor);
}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 779f14e5..9d2d1f83 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -86,8 +86,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4],
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
@@ -549,7 +549,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
__m128i descriptor = _mm_set_epi64x(high_qw,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)txdp, descriptor);
}
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index d90313af..44316f64 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -302,7 +302,7 @@ i40e_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- (void)rte_memcpy(&shaper_profile->profile, profile,
+ rte_memcpy(&shaper_profile->profile, profile,
sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
shaper_profile, node);
@@ -374,11 +374,13 @@ i40e_tm_node_search(struct rte_eth_dev *dev,
}
static int
-i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
uint32_t priority, uint32_t weight,
struct rte_tm_node_params *params,
struct rte_tm_error *error)
{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
if (node_id == RTE_TM_NODE_ID_NULL) {
error->type = RTE_TM_ERROR_TYPE_NODE_ID;
error->message = "invalid node id";
@@ -409,8 +411,8 @@ i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
return -EINVAL;
}
- /* for root node */
- if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* for non-leaf node */
+ if (node_id >= hw->func_caps.num_tx_qp) {
if (params->nonleaf.wfq_weight_mode) {
error->type =
RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
@@ -433,7 +435,7 @@ i40e_node_param_check(uint32_t node_id, uint32_t parent_node_id,
return 0;
}
- /* for TC or queue node */
+ /* for leaf node */
if (params->leaf.cman) {
error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
error->message = "Congestion management not supported";
@@ -478,7 +480,7 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
- struct i40e_tm_shaper_profile *shaper_profile;
+ struct i40e_tm_shaper_profile *shaper_profile = NULL;
struct i40e_tm_node *tm_node;
struct i40e_tm_node *parent_node;
uint16_t tc_nb = 0;
@@ -494,7 +496,7 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
return -EINVAL;
}
- ret = i40e_node_param_check(node_id, parent_node_id, priority, weight,
+ ret = i40e_node_param_check(dev, node_id, priority, weight,
params, error);
if (ret)
return ret;
@@ -507,12 +509,15 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
/* check the shaper profile id */
- shaper_profile = i40e_shaper_profile_search(dev,
- params->shaper_profile_id);
- if (!shaper_profile) {
- error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
- error->message = "shaper profile not exist";
- return -EINVAL;
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = i40e_shaper_profile_search(
+ dev, params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
}
/* root node if not have a parent */
@@ -544,12 +549,13 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->reference_count = 0;
tm_node->parent = NULL;
tm_node->shaper_profile = shaper_profile;
- (void)rte_memcpy(&tm_node->params, params,
+ rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
pf->tm_conf.root = tm_node;
/* increase the reference counter of the shaper profile */
- shaper_profile->reference_count++;
+ if (shaper_profile)
+ shaper_profile->reference_count++;
return 0;
}
@@ -615,9 +621,9 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->priority = priority;
tm_node->weight = weight;
tm_node->reference_count = 0;
- tm_node->parent = pf->tm_conf.root;
+ tm_node->parent = parent_node;
tm_node->shaper_profile = shaper_profile;
- (void)rte_memcpy(&tm_node->params, params,
+ rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
@@ -631,7 +637,8 @@ i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->parent->reference_count++;
/* increase the reference counter of the shaper profile */
- shaper_profile->reference_count++;
+ if (shaper_profile)
+ shaper_profile->reference_count++;
return 0;
}
@@ -678,14 +685,16 @@ i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
/* root node */
if (node_type == I40E_TM_NODE_TYPE_PORT) {
- tm_node->shaper_profile->reference_count--;
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
rte_free(tm_node);
pf->tm_conf.root = NULL;
return 0;
}
/* TC or queue node */
- tm_node->shaper_profile->reference_count--;
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
tm_node->parent->reference_count--;
if (node_type == I40E_TM_NODE_TYPE_TC) {
TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
@@ -753,15 +762,34 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev,
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->n_nodes_leaf_max = 0;
- cap->non_leaf_nodes_identical = true;
- cap->leaf_nodes_identical = true;
+ } else if (level_id == I40E_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->func_caps.num_tx_qp;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != I40E_TM_NODE_TYPE_QUEUE) {
cap->nonleaf.shaper_private_supported = true;
cap->nonleaf.shaper_private_dual_rate_supported = false;
cap->nonleaf.shaper_private_rate_min = 0;
/* 40Gbps -> 5GBps */
cap->nonleaf.shaper_private_rate_max = 5000000000ull;
cap->nonleaf.shaper_shared_n_max = 0;
- cap->nonleaf.sched_n_children_max = I40E_MAX_TRAFFIC_CLASS;
+ if (level_id == I40E_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ I40E_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->func_caps.num_tx_qp;
cap->nonleaf.sched_sp_n_priorities_max = 1;
cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
cap->nonleaf.sched_wfq_n_groups_max = 0;
@@ -771,21 +799,7 @@ i40e_level_capabilities_get(struct rte_eth_dev *dev,
return 0;
}
- /* TC or queue node */
- if (level_id == I40E_TM_NODE_TYPE_TC) {
- /* TC */
- cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
- cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
- cap->n_nodes_leaf_max = 0;
- cap->non_leaf_nodes_identical = true;
- } else {
- /* queue */
- cap->n_nodes_max = hw->func_caps.num_tx_qp;
- cap->n_nodes_nonleaf_max = 0;
- cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
- cap->non_leaf_nodes_identical = true;
- }
- cap->leaf_nodes_identical = true;
+ /* queue node */
cap->leaf.shaper_private_supported = true;
cap->leaf.shaper_private_dual_rate_supported = false;
cap->leaf.shaper_private_rate_min = 0;
@@ -888,11 +902,15 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev,
* If the port has a max bandwidth, the TCs should have none.
*/
/* port */
- bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ if (pf->tm_conf.root->shaper_profile)
+ bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
if (bw) {
/* check if any TC has a max bandwidth */
TAILQ_FOREACH(tm_node, tc_list, node) {
- if (tm_node->shaper_profile->profile.peak.rate) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
error->message = "no port and TC max bandwidth"
" in parallel";
@@ -936,7 +954,10 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev,
}
tc_map &= ~BIT_ULL(i);
- bw = tm_node->shaper_profile->profile.peak.rate;
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
if (!bw)
continue;
@@ -947,7 +968,10 @@ i40e_hierarchy_commit(struct rte_eth_dev *dev,
}
TAILQ_FOREACH(tm_node, queue_list, node) {
- bw = tm_node->shaper_profile->profile.peak.rate;
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
if (bw) {
error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
error->message = "not support queue QoS";
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index f12b7f4a..aeb92af3 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -35,13 +35,14 @@
#include <rte_tailq.h>
#include "base/i40e_prototype.h"
+#include "base/i40e_dcb.h"
#include "i40e_ethdev.h"
#include "i40e_pf.h"
#include "i40e_rxtx.h"
#include "rte_pmd_i40e.h"
int
-rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -66,7 +67,7 @@ rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
}
int
-rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -117,7 +118,7 @@ rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
@@ -170,7 +171,7 @@ i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
}
int
-rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -219,7 +220,7 @@ rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
@@ -264,7 +265,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
@@ -325,7 +326,7 @@ i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = f->mac_info.filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
@@ -407,7 +408,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
@@ -430,7 +431,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
}
int
-rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -473,7 +474,7 @@ rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
}
int
-rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -514,7 +515,7 @@ rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
}
int
-rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -555,7 +556,7 @@ rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
}
int
-rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr)
{
struct i40e_mac_filter *f;
@@ -591,14 +592,16 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
/* Remove all existing mac */
TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
- i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+ if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
+ != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Delete MAC failed");
return 0;
}
/* Set vlan strip on/off for specific VF from host */
int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -633,7 +636,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
uint16_t vlan_id)
{
struct rte_eth_dev *dev;
@@ -685,7 +688,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
@@ -698,7 +701,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
return ret;
}
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
uint8_t on)
{
struct rte_eth_dev *dev;
@@ -747,7 +750,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
}
if (on) {
- (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
} else {
@@ -764,7 +767,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -819,7 +822,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
}
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
@@ -858,7 +861,7 @@ i40e_vlan_filter_count(struct i40e_vsi *vsi)
return count;
}
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
uint64_t vf_mask, uint8_t on)
{
struct rte_eth_dev *dev;
@@ -941,7 +944,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
}
int
-rte_pmd_i40e_get_vf_stats(uint8_t port,
+rte_pmd_i40e_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats)
{
@@ -986,7 +989,7 @@ rte_pmd_i40e_get_vf_stats(uint8_t port,
}
int
-rte_pmd_i40e_reset_vf_stats(uint8_t port,
+rte_pmd_i40e_reset_vf_stats(uint16_t port,
uint16_t vf_id)
{
struct rte_eth_dev *dev;
@@ -1020,7 +1023,7 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port,
}
int
-rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -1109,7 +1112,7 @@ rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
}
int
-rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
uint8_t tc_num, uint8_t *bw_weight)
{
struct rte_eth_dev *dev;
@@ -1223,7 +1226,7 @@ rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
}
int
-rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
uint8_t tc_no, uint32_t bw)
{
struct rte_eth_dev *dev;
@@ -1341,7 +1344,7 @@ rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
}
int
-rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
@@ -1513,7 +1516,7 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
/* Check if the profile info exists */
static int
-i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
{
struct rte_eth_dev *dev = &rte_eth_devices[port];
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1557,7 +1560,7 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
}
int
-rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op)
{
@@ -1606,6 +1609,8 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return -EINVAL;
}
+ i40e_update_customized_info(dev, buff, size);
+
/* Find metadata segment */
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
pkg_hdr);
@@ -1704,6 +1709,27 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
return status;
}
+/* Get number of tvl records in the section */
+static unsigned int
+i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
+{
+ unsigned int i, nb_rec, nb_tlv = 0;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ if (!sec)
+ return nb_tlv;
+
+ /* get number of records in the section */
+ nb_rec = sec->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = 0; i < nb_rec; ) {
+ tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
+ i += tlv->len;
+ nb_tlv++;
+ }
+ return nb_tlv;
+}
+
int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
uint8_t *info_buff, uint32_t info_size,
enum rte_pmd_i40e_package_info type)
@@ -1858,12 +1884,162 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
return I40E_SUCCESS;
}
+ /* get number of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
+ struct i40e_profile_section_header *proto;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_proto_info *pinfo;
+ struct i40e_profile_section_header *proto;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_proto_info);
+ for (i = 0; i < nb_proto_info; i++) {
+ pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
+ memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(proto);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = proto->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ pinfo[j].proto_id = tlv->data[0];
+ snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
+ (const char *)&tlv->data[1]);
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
+ struct i40e_profile_section_header *pctype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *pctype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(pctype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+
+ /* get number of records in the section */
+ nb_rec = pctype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
+ struct i40e_profile_section_header *ptype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *ptype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(ptype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = ptype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = j = 0; i < nb_rec; j++) {
+ tlv = (struct i40e_profile_tlv_section_record *)
+ &ptype[1 + i];
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ }
+ return I40E_SUCCESS;
+ }
+
PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
return -EINVAL;
}
int
-rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
@@ -1933,7 +2109,9 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
tnl != RTE_PTYPE_TUNNEL_VXLAN &&
tnl != RTE_PTYPE_TUNNEL_NVGRE &&
tnl != RTE_PTYPE_TUNNEL_GENEVE &&
- tnl != RTE_PTYPE_TUNNEL_GRENAT)
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_GTPC &&
+ tnl != RTE_PTYPE_TUNNEL_GTPU)
return -1;
if (il2 &&
@@ -1991,7 +2169,7 @@ static int check_invalid_ptype_mapping(
int
rte_pmd_i40e_ptype_mapping_update(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t count,
uint8_t exclusive)
@@ -2027,7 +2205,7 @@ rte_pmd_i40e_ptype_mapping_update(
return 0;
}
-int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
{
struct rte_eth_dev *dev;
@@ -2044,7 +2222,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
}
int rte_pmd_i40e_ptype_mapping_get(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t size,
uint16_t *count,
@@ -2078,7 +2256,7 @@ int rte_pmd_i40e_ptype_mapping_get(
return 0;
}
-int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
uint32_t target,
uint8_t mask,
uint32_t pkt_type)
@@ -2115,3 +2293,695 @@ int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
return 0;
}
+
+int
+rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ struct i40e_mac_filter_info mac_filter;
+ int ret;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ether_addr_copy(mac_addr, &mac_filter.mac_addr);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_pctype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ mapping_items[i].flow_type = i;
+ mapping_items[i].pctype = ad->pctypes_tbl[i];
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_flow_type_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_FLOW_TYPE_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++)
+ if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
+ mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
+ (mapping_items[i].pctype &
+ (1ULL << I40E_FILTER_PCTYPE_INVALID)))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ ad->pctypes_tbl[mapping_items[i].flow_type] =
+ mapping_items[i].pctype;
+ if (mapping_items[i].pctype)
+ ad->flow_types_mask |=
+ (1ULL << mapping_items[i].flow_type);
+ else
+ ad->flow_types_mask &=
+ ~(1ULL << mapping_items[i].flow_type);
+ }
+
+ for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct i40e_pf *pf;
+ int vf_id;
+ struct i40e_pf_vf *vf;
+ uint16_t vf_num;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vf_num = pf->vf_num;
+
+ for (vf_id = 0; vf_id < vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ mac = &vf->mac_addr;
+
+ if (is_same_ether_addr(mac, vf_mac))
+ return vf_id;
+ }
+
+ return -EINVAL;
+}
+
+static int
+i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint16_t i;
+ struct i40e_vsi *vsi = pf->main_vsi;
+ uint16_t queue_offset, bsf, tc_index;
+ struct i40e_vsi_context ctxt;
+ struct i40e_aqc_vsi_properties_data *vsi_info;
+ struct i40e_queue_regions *region_info =
+ &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!region_info->queue_region_number) {
+ PMD_INIT_LOG(ERR, "there is no that region id been set before");
+ return ret;
+ }
+
+ memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ vsi_info = &ctxt.info;
+
+ memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
+ memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
+
+ /* Configure queue region and queue mapping parameters,
+ * for enabled queue region, allocate queues to this region.
+ */
+
+ for (i = 0; i < region_info->queue_region_number; i++) {
+ tc_index = region_info->region[i].region_id;
+ bsf = rte_bsf32(region_info->region[i].queue_num);
+ queue_offset = region_info->region[i].queue_start_index;
+ vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
+ (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ vsi_info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ vsi_info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ return 0;
+}
+
+
+static int
+i40e_queue_region_set_region(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *conf_ptr)
+{
+ uint16_t i;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
+ conf_ptr->queue_num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return ret;
+ }
+
+ if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
+ > main_vsi->nb_used_qps) {
+ PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (conf_ptr->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number &&
+ i <= I40E_REGION_MAX_INDEX) {
+ info->region[i].region_id = conf_ptr->region_id;
+ info->region[i].queue_num = conf_ptr->queue_num;
+ info->region[i].queue_start_index =
+ conf_ptr->queue_start_index;
+ info->queue_region_number++;
+ } else {
+ PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i40e_queue_region_set_flowtype(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i, j;
+ uint16_t region_index, flowtype_index;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return ret;
+ }
+
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ if (rss_region_conf->hw_flowtype ==
+ info->region[i].hw_flowtype[j]) {
+ PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
+ return 0;
+ }
+ }
+ }
+
+ flowtype_index = info->region[region_index].flowtype_num;
+ info->region[region_index].hw_flowtype[flowtype_index] =
+ rss_region_conf->hw_flowtype;
+ info->region[region_index].flowtype_num++;
+
+ return 0;
+}
+
+static void
+i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint8_t hw_flowtype;
+ uint32_t pfqf_hregion;
+ uint16_t i, j, index;
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ hw_flowtype = info->region[i].hw_flowtype[j];
+ index = hw_flowtype >> 3;
+ pfqf_hregion =
+ i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
+
+ if ((hw_flowtype & 0x7) == 0) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_0_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 1) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_1_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 2) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_2_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 3) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_3_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 4) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_4_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 5) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_5_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 6) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_6_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
+ } else {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_7_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
+ }
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
+ pfqf_hregion);
+ }
+ }
+}
+
+static int
+i40e_queue_region_set_user_priority(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, region_index;
+
+ if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the region_id max index is 7");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ if (info->region[i].user_priority[j] ==
+ rss_region_conf->user_priority) {
+ PMD_DRV_LOG(ERR, "that user priority has been set before");
+ return 0;
+ }
+ }
+ }
+
+ j = info->region[region_index].user_priority_num;
+ info->region[region_index].user_priority[j] =
+ rss_region_conf->user_priority;
+ info->region[region_index].user_priority_num++;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_dcb_configure(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config dcb_cfg_local;
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, prio_index, region_index;
+ uint8_t tc_map, tc_bw, bw_lf;
+
+ if (!info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "No queue region been set before");
+ return ret;
+ }
+
+ dcb_cfg = &dcb_cfg_local;
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ prio_index = info->region[i].user_priority[j];
+ region_index = info->region[i].region_id;
+ dcb_cfg->etscfg.prioritytable[prio_index] =
+ region_index;
+ }
+ }
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
+
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = tc_map;
+
+ /* Copy the new config to the current config */
+ *old_cfg = *dcb_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+ struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+
+ if (on) {
+ i40e_queue_region_pf_flowtype_conf(hw, pf);
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+ return ret;
+ }
+
+ ret = i40e_queue_region_dcb_configure(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ info->queue_region_number = 1;
+ info->region[0].queue_num = main_vsi->nb_used_qps;
+ info->region[0].queue_start_index = 0;
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+
+ i40e_init_queue_region_conf(dev);
+
+ return 0;
+}
+
+static int
+i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+ if (!hena)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_get_all_info(struct i40e_pf *pf,
+ struct i40e_queue_regions *regions_ptr)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ rte_memcpy(regions_ptr, info,
+ sizeof(struct i40e_queue_regions));
+
+ return 0;
+}
+
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+ enum rte_pmd_i40e_queue_region_op op_type, void *arg)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!(!i40e_queue_region_pf_check_rss(pf)))
+ return -ENOTSUP;
+
+ /* This queue region feature only support pf by now. It should
+ * be called after dev_start, and will be clear after dev_stop.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
+ * is just an enable function which server for other configuration,
+ * it is for all configuration about queue region from up layer,
+ * at first will only keep in DPDK softwarestored in driver,
+ * only after "FLUSH_ON", it commit all configuration to HW.
+ * Because PMD had to set hardware configuration at a time, so
+ * it will record all up layer command at first.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
+ * just clean all configuration about queue region just now,
+ * and restore all to DPDK i40e driver default
+ * config when start up.
+ */
+
+ switch (op_type) {
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
+ ret = i40e_queue_region_set_region(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
+ ret = i40e_queue_region_set_flowtype(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
+ ret = i40e_queue_region_set_user_priority(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
+ ret = i40e_queue_region_get_all_info(pf,
+ (struct i40e_queue_regions *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "op type (%d) not supported",
+ op_type);
+ ret = -EINVAL;
+ }
+
+ I40E_WRITE_FLUSH(hw);
+
+ return ret;
+}
+
+int rte_pmd_i40e_flow_add_del_packet_template(
+ uint16_t port,
+ const struct rte_pmd_i40e_pkt_template_conf *conf,
+ uint8_t add)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_fdir_filter_conf filter_conf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ memset(&filter_conf, 0, sizeof(filter_conf));
+ filter_conf.soft_id = conf->soft_id;
+ filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
+ filter_conf.input.flow.raw_flow.packet = conf->input.packet;
+ filter_conf.input.flow.raw_flow.length = conf->input.length;
+ filter_conf.input.flow_ext.pkt_template = true;
+
+ filter_conf.action.rx_queue = conf->action.rx_queue;
+ filter_conf.action.behavior =
+ (enum i40e_fdir_behavior)conf->action.behavior;
+ filter_conf.action.report_status =
+ (enum i40e_fdir_status)conf->action.report_status;
+ filter_conf.action.flex_off = conf->action.flex_off;
+
+ return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 356fa89d..580ca4ae 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -88,10 +88,48 @@ enum rte_pmd_i40e_package_info {
RTE_PMD_I40E_PKG_INFO_HEADER,
RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM,
+ RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM,
+ RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_NUM,
+ RTE_PMD_I40E_PKG_INFO_PTYPE_LIST,
RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
};
-#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+/**
+ * Option types of queue region.
+ */
+enum rte_pmd_i40e_queue_region_op {
+ RTE_PMD_I40E_RSS_QUEUE_REGION_UNDEFINED,
+ /** add queue region set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_SET,
+ /** add PF region pctype set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET,
+ /** add queue region user priority set */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET,
+ /**
+ * ALL configuration about queue region from up layer
+ * at first will only keep in DPDK software stored in driver,
+ * only after " FLUSH_ON ", it commit all configuration to HW.
+ * Because PMD had to set hardware configuration at a time, so
+ * it will record all up layer command at first.
+ */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON,
+ /**
+ * "FLUSH_OFF " is just clean all configuration about queue
+ * region just now, and restore all to DPDK i40e driver default
+ * config when start up.
+ */
+ RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF,
+ RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET,
+ RTE_PMD_I40E_RSS_QUEUE_REGION_OP_MAX
+};
+
+#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+#define RTE_PMD_I40E_PCTYPE_MAX 64
+#define RTE_PMD_I40E_REGION_MAX_NUM 8
+#define RTE_PMD_I40E_MAX_USER_PRIORITY 8
/**
* Version for dynamic device personalization.
@@ -133,6 +171,25 @@ struct rte_pmd_i40e_profile_list {
struct rte_pmd_i40e_profile_info p_info[1];
};
+#define RTE_PMD_I40E_PROTO_NUM 6
+#define RTE_PMD_I40E_PROTO_UNUSED 0xFF
+
+/**
+ * Protocols information stored in profile
+ */
+struct rte_pmd_i40e_proto_info {
+ uint8_t proto_id;
+ char name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+/**
+ * Packet classification/ packet type information stored in profile
+ */
+struct rte_pmd_i40e_ptype_info {
+ uint8_t ptype_id;
+ uint8_t protocols[RTE_PMD_I40E_PROTO_NUM];
+};
+
/**
* ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype.
* A ptype with MSB set will be regarded as a user defined ptype.
@@ -146,6 +203,141 @@ struct rte_pmd_i40e_ptype_mapping {
};
/**
+ * Queue region related information.
+ */
+struct rte_pmd_i40e_queue_region_conf {
+ /** the region id for this configuration */
+ uint8_t region_id;
+ /** the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype;
+ /** the start queue index for this region */
+ uint8_t queue_start_index;
+ /** the total queue number of this queue region */
+ uint8_t queue_num;
+ /** the packet's user priority for this region */
+ uint8_t user_priority;
+};
+
+/* queue region info */
+struct rte_pmd_i40e_queue_region_info {
+ /** the region id for this configuration */
+ uint8_t region_id;
+ /** the start queue index for this region */
+ uint8_t queue_start_index;
+ /** the total queue number of this queue region */
+ uint8_t queue_num;
+ /** the total number of user priority for this region */
+ uint8_t user_priority_num;
+ /** the packet's user priority for this region */
+ uint8_t user_priority[RTE_PMD_I40E_MAX_USER_PRIORITY];
+ /** the total number of flowtype for this region */
+ uint8_t flowtype_num;
+ /**
+ * the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+ uint8_t hw_flowtype[RTE_PMD_I40E_PCTYPE_MAX];
+};
+
+struct rte_pmd_i40e_queue_regions {
+ /** the total number of queue region for this port */
+ uint16_t queue_region_number;
+ struct rte_pmd_i40e_queue_region_info
+ region[RTE_PMD_I40E_REGION_MAX_NUM];
+};
+
+/**
+ * Behavior will be taken if raw packet template is matched.
+ */
+enum rte_pmd_i40e_pkt_template_behavior {
+ RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT,
+ RTE_PMD_I40E_PKT_TEMPLATE_REJECT,
+ RTE_PMD_I40E_PKT_TEMPLATE_PASSTHRU,
+};
+
+/**
+ * Flow director report status
+ * It defines what will be reported if raw packet template is matched.
+ */
+enum rte_pmd_i40e_pkt_template_status {
+ /** report nothing */
+ RTE_PMD_I40E_PKT_TEMPLATE_NO_REPORT_STATUS,
+ /** only report FD ID */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID,
+ /** report FD ID and 4 flex bytes */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4,
+ /** report 8 flex bytes */
+ RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8,
+};
+
+/**
+ * A structure used to define an action when raw packet template is matched.
+ */
+struct rte_pmd_i40e_pkt_template_action {
+ /** queue assigned to if raw packet template match */
+ uint16_t rx_queue;
+ /** behavior will be taken */
+ enum rte_pmd_i40e_pkt_template_behavior behavior;
+ /** status report option */
+ enum rte_pmd_i40e_pkt_template_status report_status;
+ /**
+ * If report_status is RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID_FLEX_4 or
+ * RTE_PMD_I40E_PKT_TEMPLATE_REPORT_FLEX_8, flex_off specifies
+ * where the reported flex bytes start from in flexible payload.
+ */
+ uint8_t flex_off;
+};
+
+/**
+ * A structure used to define the input for raw packet template.
+ */
+struct rte_pmd_i40e_pkt_template_input {
+ /** the pctype used for raw packet template */
+ uint16_t pctype;
+ /** the buffer conatining raw packet template */
+ void *packet;
+ /** the length of buffer with raw packet template */
+ uint32_t length;
+};
+
+/**
+ * A structure used to define the configuration parameters
+ * for raw packet template.
+ */
+struct rte_pmd_i40e_pkt_template_conf {
+ /** the input for raw packet template. */
+ struct rte_pmd_i40e_pkt_template_input input;
+ /** the action to be taken when raw packet template is matched */
+ struct rte_pmd_i40e_pkt_template_action action;
+ /** ID, an unique software index for the raw packet template filter */
+ uint32_t soft_id;
+};
+
+/**
+ * Add or remove raw packet template filter to Flow Director.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param conf
+ * Specifies configuration parameters of raw packet template filter.
+ * @param add
+ * Speicifes an action to be taken - add or remove raw packet template filter.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *conf* invalid.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_flow_add_del_packet_template(
+ uint16_t port,
+ const struct rte_pmd_i40e_pkt_template_conf *conf,
+ uint8_t add);
+
+/**
* Notify VF when PF link status changes.
*
* @param port
@@ -157,7 +349,7 @@ struct rte_pmd_i40e_ptype_mapping {
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* invalid.
*/
-int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+int rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf);
/**
* Enable/Disable VF MAC anti spoofing.
@@ -174,7 +366,7 @@ int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port,
uint16_t vf_id,
uint8_t on);
@@ -193,7 +385,7 @@ int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port,
uint16_t vf_id,
uint8_t on);
@@ -210,7 +402,7 @@ int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+int rte_pmd_i40e_set_tx_loopback(uint16_t port,
uint8_t on);
/**
@@ -228,7 +420,7 @@ int rte_pmd_i40e_set_tx_loopback(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+int rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port,
uint16_t vf_id,
uint8_t on);
@@ -247,7 +439,7 @@ int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port,
uint16_t vf_id,
uint8_t on);
@@ -271,7 +463,7 @@ int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr);
/**
@@ -291,7 +483,7 @@ int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
@@ -309,7 +501,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
uint16_t vlan_id);
/**
@@ -328,7 +520,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
uint8_t on);
/**
@@ -347,7 +539,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on);
/**
* Enable/Disable VF VLAN filter
@@ -368,7 +560,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
uint64_t vf_mask, uint8_t on);
/**
@@ -393,7 +585,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_get_vf_stats(uint8_t port,
+int rte_pmd_i40e_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats);
@@ -409,7 +601,7 @@ int rte_pmd_i40e_get_vf_stats(uint8_t port,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+int rte_pmd_i40e_reset_vf_stats(uint16_t port,
uint16_t vf_id);
/**
@@ -434,7 +626,7 @@ int rte_pmd_i40e_reset_vf_stats(uint8_t port,
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
+int rte_pmd_i40e_set_vf_max_bw(uint16_t port,
uint16_t vf_id,
uint32_t bw);
@@ -459,7 +651,7 @@ int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port,
uint16_t vf_id,
uint8_t tc_num,
uint8_t *bw_weight);
@@ -484,7 +676,7 @@ int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
+int rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port,
uint16_t vf_id,
uint8_t tc_no,
uint32_t bw);
@@ -502,7 +694,7 @@ int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
+int rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map);
/**
* Load/Unload a ddp package
@@ -523,7 +715,7 @@ int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
* - (-EACCES) if profile does not exist.
* - (-ENOTSUP) if operation not supported.
*/
-int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+int rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op);
@@ -561,7 +753,7 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg, uint32_t pkg_size,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
+int rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size);
/**
* Update hardware defined ptype to software defined packet type
@@ -581,7 +773,7 @@ int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
* set other PTYPEs maps to PTYPE_UNKNOWN.
*/
int rte_pmd_i40e_ptype_mapping_update(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t count,
uint8_t exclusive);
@@ -593,7 +785,7 @@ int rte_pmd_i40e_ptype_mapping_update(
* @param port
* pointer to port identifier of the device
*/
-int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port);
/**
* Get hardware defined ptype to software defined ptype
@@ -612,7 +804,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
* -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
*/
int rte_pmd_i40e_ptype_mapping_get(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t size,
uint16_t *count,
@@ -632,9 +824,113 @@ int rte_pmd_i40e_ptype_mapping_get(
* @param pkt_type
* the new packet type to overwrite
*/
-int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
uint32_t target,
uint8_t mask,
uint32_t pkt_type);
+/**
+ * Add a VF MAC address.
+ *
+ * Add more MAC address for VF. The existing MAC addresses
+ * are still effective.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+#define RTE_PMD_I40E_PCTYPE_MAX 64
+#define RTE_PMD_I40E_FLOW_TYPE_MAX 64
+
+struct rte_pmd_i40e_flow_type_mapping {
+ uint16_t flow_type; /**< software defined flow type*/
+ uint64_t pctype; /**< hardware defined pctype */
+};
+
+/**
+ * Update hardware defined pctype to software defined flow type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different pctype mapping update method.
+ * -(0) only overwrite referred PCTYPE mapping,
+ * keep other PCTYPEs mapping unchanged.
+ * -(!0) overwrite referred PCTYPE mapping,
+ * set other PCTYPEs maps to PCTYPE_INVALID.
+ */
+int rte_pmd_i40e_flow_type_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Get software defined flow type to hardware defined pctype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * array should be allocated by caller with minimum size of
+ * RTE_PMD_I40E_FLOW_TYPE_MAX items
+ */
+int rte_pmd_i40e_flow_type_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items);
+
+/**
+ * Reset hardware defined pctype to software defined flow type
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port);
+
+/**
+ * On the PF, find VF index based on VF MAC address
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param vf_mac
+ * the mac address of the vf to determine index of
+ * @return
+ * The index of vfid If successful.
+ * -EINVAL: vf mac address does not exist for this port
+ * -ENOTSUP: i40e not supported for this port.
+ */
+int rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
+ const struct ether_addr *vf_mac);
+
+/**
+ * Do RSS queue region configuration for that port as
+ * the command option type
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param op_type
+ * Queue region operation type
+ * @param arg
+ * Queue region operation type specific data
+ */
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+ enum rte_pmd_i40e_queue_region_op op_type, void *arg);
+
#endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index 20cc9801..ebbd24e0 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -45,3 +45,16 @@ DPDK_17.08 {
rte_pmd_i40e_get_ddp_info;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_pmd_i40e_add_vf_mac_addr;
+ rte_pmd_i40e_flow_add_del_packet_template;
+ rte_pmd_i40e_flow_type_mapping_update;
+ rte_pmd_i40e_flow_type_mapping_get;
+ rte_pmd_i40e_flow_type_mapping_reset;
+ rte_pmd_i40e_query_vfid_by_mac;
+ rte_pmd_i40e_rss_queue_region_conf;
+
+} DPDK_17.08;
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 5e57cb35..511a64eb 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -41,7 +41,7 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_pmd_ixgbe_version.map
-LIBABIVER := 1
+LIBABIVER := 2
ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
@@ -82,12 +82,15 @@ endif
endif
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
#
# Add extra flags for base driver files (also known as shared code)
# to disable warnings in them
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
@@ -118,11 +121,13 @@ SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
else
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
endif
-
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 4aab278d..bb5dfd2a 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -161,4 +161,12 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr)
#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms) \
+do { \
+ uint32_t cnt = poll_ms; \
+ IXGBE_WRITE_REG(hw, (reg), (val)); \
+ while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--)) \
+ rte_delay_ms(1); \
+} while (0)
+
#endif /* _IXGBE_OS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 22171d86..ff19a564 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -48,10 +48,10 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -61,6 +61,9 @@
#include <rte_random.h>
#include <rte_dev.h>
#include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -169,13 +172,14 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev);
static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static int ixgbe_dev_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -218,7 +222,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
-static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
@@ -265,16 +269,17 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
+static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
-static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
@@ -518,6 +523,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_set_link_up = ixgbe_dev_set_link_up,
.dev_set_link_down = ixgbe_dev_set_link_down,
.dev_close = ixgbe_dev_close,
+ .dev_reset = ixgbe_dev_reset,
.promiscuous_enable = ixgbe_dev_promiscuous_enable,
.promiscuous_disable = ixgbe_dev_promiscuous_disable,
.allmulticast_enable = ixgbe_dev_allmulticast_enable,
@@ -608,6 +614,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.xstats_reset = ixgbevf_dev_stats_reset,
.xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
+ .dev_reset = ixgbevf_dev_reset,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get = ixgbevf_dev_info_get,
@@ -1163,8 +1170,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
+#ifdef RTE_LIBRTE_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
+ if (eth_dev->security_ctx == NULL)
+ return -ENOMEM;
+#endif
+
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -1332,12 +1345,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize l2 tunnel filter list & hash */
ixgbe_l2_tn_filter_init(eth_dev);
- TAILQ_INIT(&filter_ntuple_list);
- TAILQ_INIT(&filter_ethertype_list);
- TAILQ_INIT(&filter_syn_list);
- TAILQ_INIT(&filter_fdir_list);
- TAILQ_INIT(&filter_l2_tunnel_list);
- TAILQ_INIT(&ixgbe_flow_list);
+ /* initialize flow filter lists */
+ ixgbe_filterlist_init();
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
@@ -1401,6 +1410,10 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* Remove all Traffic Manager configuration */
ixgbe_tm_conf_uninit(eth_dev);
+#ifdef RTE_LIBRTE_SECURITY
+ rte_free(eth_dev->security_ctx);
+#endif
+
return 0;
}
@@ -1627,7 +1640,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1781,7 +1793,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_ixgbe_pmd = {
.id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_ixgbe_pci_probe,
.remove = eth_ixgbe_pci_remove,
};
@@ -1803,7 +1816,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
*/
static struct rte_pci_driver rte_ixgbevf_pmd = {
.id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_ixgbevf_pci_probe,
.remove = eth_ixgbevf_pci_remove,
};
@@ -1959,9 +1972,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
rxq = dev->data->rx_queues[queue];
if (on)
- rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
else
- rxq->vlan_flags = PKT_RX_VLAN_PKT;
+ rxq->vlan_flags = PKT_RX_VLAN;
}
static void
@@ -2125,7 +2138,7 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
*/
}
-static void
+static int
ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
@@ -2148,6 +2161,8 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
ixgbe_vlan_hw_extend_disable(dev);
}
+
+ return 0;
}
static void
@@ -2504,8 +2519,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
* - fixed speed: TODO implement
*/
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
return -EINVAL;
}
@@ -2568,9 +2584,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_set(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
/* Enable vlan filtering for VMDq */
@@ -2842,7 +2862,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
}
/*
- * Reest and stop device.
+ * Reset and stop device.
*/
static void
ixgbe_dev_close(struct rte_eth_dev *dev)
@@ -2865,6 +2885,32 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
+/*
+ * Reset PF device.
+ */
+static int
+ixgbe_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_ixgbe_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_ixgbe_dev_init(dev);
+
+ return ret;
+}
+
static void
ixgbe_read_stats_registers(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *hw_stats,
@@ -3077,7 +3123,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
/*
* This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
*/
-static void
+static int
ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct ixgbe_hw *hw =
@@ -3099,7 +3145,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
&total_qbrc, &total_qprc, &total_qprdc);
if (stats == NULL)
- return;
+ return -EINVAL;
/* Fill out the rte_eth_stats statistics structure */
stats->ipackets = total_qprc;
@@ -3130,6 +3176,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Tx Errors */
stats->oerrors = 0;
+ return 0;
}
static void
@@ -3541,7 +3588,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return IXGBEVF_NB_XSTATS;
}
-static void
+static int
ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -3550,12 +3597,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ixgbevf_update_stats(dev);
if (stats == NULL)
- return;
+ return -EINVAL;
stats->ipackets = hw_stats->vfgprc;
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
+ return 0;
}
static void
@@ -3665,6 +3713,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw->mac.type == ixgbe_mac_X550EM_a)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+#ifdef RTE_LIBRTE_SECURITY
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
@@ -3927,6 +3980,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
memset(&old, 0, sizeof(old));
rte_ixgbe_dev_atomic_read_link_status(dev, &old);
@@ -4993,13 +5047,21 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_set(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
+ ixgbe_dev_clear_queues(dev);
+ return err;
+ }
ixgbevf_dev_rxtx_start(dev);
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
- intr_vector = dev->data->nb_rx_queues;
+ /* According to datasheet, only vector 0/1/2 can be used,
+ * now only one vector is used for Rx queue
+ */
+ intr_vector = 1;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
}
@@ -5016,6 +5078,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
}
ixgbevf_configure_msix(dev);
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+ * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+ * is not cleared, it will fail when following rte_intr_enable( ) tries
+ * to map Rx queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ rte_intr_disable(intr_handle);
+
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
@@ -5078,6 +5149,23 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
ixgbevf_remove_mac_addr(dev, 0);
}
+/*
+ * Reset VF device
+ */
+static int
+ixgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = eth_ixgbevf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_ixgbevf_dev_init(dev);
+
+ return ret;
+}
+
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5153,7 +5241,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
-static void
+static int
ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct ixgbe_hw *hw =
@@ -5168,6 +5256,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
for (i = 0; i < hw->mac.max_rx_queues; i++)
ixgbevf_vlan_strip_queue_set(dev, i, on);
}
+
+ return 0;
}
int
@@ -5450,13 +5540,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
/* write pool mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
mp_msb);
}
/* write VLAN mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
mv_msb);
@@ -5509,9 +5599,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = IXGBE_MISC_VEC_ID;
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask |= (1 << IXGBE_MISC_VEC_ID);
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ mask |= (1 << vec);
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -5526,9 +5619,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask &= ~(1 << IXGBE_MISC_VEC_ID);
+ if (rte_intr_allow_others(intr_handle))
+ vec = IXGBE_RX_VEC_START;
+ mask &= ~(1 << vec);
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -5670,6 +5768,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ uint32_t base = IXGBE_MISC_VEC_ID;
/* Configure VF other cause ivar */
ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
@@ -5680,6 +5779,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
if (!rte_intr_dp_is_en(intr_handle))
return;
+ if (rte_intr_allow_others(intr_handle)) {
+ base = IXGBE_RX_VEC_START;
+ vector_idx = IXGBE_RX_VEC_START;
+ }
+
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
@@ -5687,6 +5791,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
+ if (vector_idx < base + intr_handle->nb_efd - 1)
+ vector_idx++;
}
}
@@ -6313,7 +6419,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
sizeof(struct ixgbe_5tuple_filter), 0);
if (filter == NULL)
return -ENOMEM;
- (void)rte_memcpy(&filter->filter_info,
+ rte_memcpy(&filter->filter_info,
&filter_5tuple,
sizeof(struct ixgbe_5tuple_filter_info));
filter->queue = ntuple_filter->queue;
@@ -7153,6 +7259,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
uint8_t i, j;
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
@@ -7160,19 +7268,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
else
dcb_info->nb_tcs = 1;
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
- for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
- for (j = 0; j < dcb_info->nb_tcs; j++) {
- dcb_info->tc_queue.tc_rxq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
- dcb_info->tc_queue.tc_txq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
}
}
} else { /* vt is disabled*/
@@ -7529,7 +7649,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
if (!node)
return -ENOMEM;
- (void)rte_memcpy(&node->key,
+ rte_memcpy(&node->key,
&key,
sizeof(struct ixgbe_l2_tn_key));
node->pool = l2_tunnel->pool;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index caa50c8b..51ddcfd4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -38,9 +38,13 @@
#include "base/ixgbe_dcb_82599.h"
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
+#ifdef RTE_LIBRTE_SECURITY
+#include "ixgbe_ipsec.h"
+#endif
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_tm_driver.h>
/* need update link, bit flag */
@@ -364,49 +368,6 @@ struct rte_flow {
enum rte_filter_type filter_type;
void *rule;
};
-/* ntuple filter list structure */
-struct ixgbe_ntuple_filter_ele {
- TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
- struct rte_eth_ntuple_filter filter_info;
-};
-/* ethertype filter list structure */
-struct ixgbe_ethertype_filter_ele {
- TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
- struct rte_eth_ethertype_filter filter_info;
-};
-/* syn filter list structure */
-struct ixgbe_eth_syn_filter_ele {
- TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
- struct rte_eth_syn_filter filter_info;
-};
-/* fdir filter list structure */
-struct ixgbe_fdir_rule_ele {
- TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
- struct ixgbe_fdir_rule filter_info;
-};
-/* l2_tunnel filter list structure */
-struct ixgbe_eth_l2_tunnel_conf_ele {
- TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
- struct rte_eth_l2_tunnel_conf filter_info;
-};
-/* ixgbe_flow memory list structure */
-struct ixgbe_flow_mem {
- TAILQ_ENTRY(ixgbe_flow_mem) entries;
- struct rte_flow *flow;
-};
-
-TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
-struct ixgbe_ntuple_filter_list filter_ntuple_list;
-TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
-struct ixgbe_ethertype_filter_list filter_ethertype_list;
-TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
-struct ixgbe_syn_filter_list filter_syn_list;
-TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
-struct ixgbe_fdir_rule_filter_list filter_fdir_list;
-TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
-struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
-TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
-struct ixgbe_flow_mem_list ixgbe_flow_list;
/*
* Statistics counters collected by the MACsec
@@ -529,7 +490,9 @@ struct ixgbe_adapter {
struct ixgbe_filter_info filter;
struct ixgbe_l2_tn_info l2_tn;
struct ixgbe_bw_conf bw_conf;
-
+#ifdef RTE_LIBRTE_SECURITY
+ struct ixgbe_ipsec ipsec;
+#endif
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
struct rte_timecounter systime_tc;
@@ -586,6 +549,9 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
(&((struct ixgbe_adapter *)adapter)->tm_conf)
+#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
+ (&((struct ixgbe_adapter *)adapter)->ipsec)
+
/*
* RX/TX function prototypes
*/
@@ -692,6 +658,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_init(void);
void ixgbe_filterlist_flush(void);
/*
* Flow director function prototypes
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index eb2d5581..9281dc1a 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -1276,7 +1276,8 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
rule->ixgbe_fdir.formatted.flow_type ==
IXGBE_ATR_FLOW_TYPE_IPV6) &&
(info->mask.src_port_mask != 0 ||
- info->mask.dst_port_mask != 0)) {
+ info->mask.dst_port_mask != 0) &&
+ rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
PMD_DRV_LOG(ERR, "By this device,"
" IPv4 is not supported without"
" L4 protocol and ports masked!");
@@ -1347,7 +1348,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
0);
if (!node)
return -ENOMEM;
- (void)rte_memcpy(&node->ixgbe_fdir,
+ rte_memcpy(&node->ixgbe_fdir,
&rule->ixgbe_fdir,
sizeof(union ixgbe_atr_input));
node->fdirflags = fdircmd_flags;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index d6796088..19c2d479 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -51,7 +51,6 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
@@ -79,6 +78,51 @@
#define IXGBE_MAX_N_TUPLE_PRIO 7
#define IXGBE_MAX_FLX_SOURCE_OFF 62
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+
+static struct ixgbe_ntuple_filter_list filter_ntuple_list;
+static struct ixgbe_ethertype_filter_list filter_ethertype_list;
+static struct ixgbe_syn_filter_list filter_syn_list;
+static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_flow_mem_list ixgbe_flow_list;
+
/**
* Endless loop will never happen with below assumption
* 1. there is at least one no-void item(END)
@@ -142,6 +186,9 @@ const struct rte_flow_action *next_no_void_action(
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
*/
static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -181,6 +228,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+#ifdef RTE_LIBRTE_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
@@ -474,6 +558,12 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
/* Ixgbe doesn't support tcp flags. */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -1004,7 +1094,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
* The first not void item can be E_TAG.
* The next not void item must be END.
* action:
- * The first not void action should be QUEUE.
+ * The first not void action should be VF or PF.
* The next not void action should be END.
* pattern example:
* ITEM Spec Mask
@@ -1015,7 +1105,8 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
* item->last should be NULL.
*/
static int
-cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_l2_tunnel_conf *filter,
@@ -1025,7 +1116,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_spec;
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
- const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1133,9 +1225,10 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* check if the first not void action is QUEUE. */
+ /* check if the first not void action is VF or PF. */
act = next_no_void_action(actions, NULL);
- if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
+ act->type != RTE_FLOW_ACTION_TYPE_PF) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1143,8 +1236,12 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- act_q = (const struct rte_flow_action_queue *)act->conf;
- filter->pool = act_q->index;
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->pool = act_vf->id;
+ } else {
+ filter->pool = pci_dev->max_vfs;
+ }
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
@@ -1169,8 +1266,10 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num;
- ret = cons_parse_l2_tn_filter(attr, pattern,
+ ret = cons_parse_l2_tn_filter(dev, attr, pattern,
actions, l2_tn_filter, error);
if (hw->mac.type != ixgbe_mac_X550 &&
@@ -1183,7 +1282,9 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
+ vf_num = pci_dev->max_vfs;
+
+ if (l2_tn_filter->pool > vf_num)
return -rte_errno;
return ret;
@@ -2600,6 +2701,17 @@ step_next:
}
void
+ixgbe_filterlist_init(void)
+{
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+}
+
+void
ixgbe_filterlist_flush(void)
{
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
@@ -2702,12 +2814,23 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
if (!ret) {
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
sizeof(struct ixgbe_ntuple_filter_ele), 0);
- (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
@@ -2729,7 +2852,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
ethertype_filter_ptr = rte_zmalloc(
"ixgbe_ethertype_filter",
sizeof(struct ixgbe_ethertype_filter_ele), 0);
- (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
&ethertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
@@ -2749,7 +2876,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
if (!ret) {
syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
sizeof(struct ixgbe_eth_syn_filter_ele), 0);
- (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
@@ -2809,7 +2940,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
if (!ret) {
fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
sizeof(struct ixgbe_fdir_rule_ele), 0);
- (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
&fdir_rule,
sizeof(struct ixgbe_fdir_rule));
TAILQ_INSERT_TAIL(&filter_fdir_list,
@@ -2842,7 +2977,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
if (!ret) {
l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
- (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ if (!l2_tn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&l2_tn_filter_ptr->filter_info,
&l2_tn_filter,
sizeof(struct rte_eth_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
@@ -2941,7 +3080,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_NTUPLE:
ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&ntuple_filter,
+ rte_memcpy(&ntuple_filter,
&ntuple_filter_ptr->filter_info,
sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
@@ -2954,7 +3093,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&ethertype_filter,
+ rte_memcpy(&ethertype_filter,
&ethertype_filter_ptr->filter_info,
sizeof(struct rte_eth_ethertype_filter));
ret = ixgbe_add_del_ethertype_filter(dev,
@@ -2968,7 +3107,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_SYN:
syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&syn_filter,
+ rte_memcpy(&syn_filter,
&syn_filter_ptr->filter_info,
sizeof(struct rte_eth_syn_filter));
ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
@@ -2980,7 +3119,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
break;
case RTE_ETH_FILTER_FDIR:
fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
- (void)rte_memcpy(&fdir_rule,
+ rte_memcpy(&fdir_rule,
&fdir_rule_ptr->filter_info,
sizeof(struct ixgbe_fdir_rule));
ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
@@ -2995,7 +3134,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
- (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
sizeof(struct rte_eth_l2_tunnel_conf));
ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 00000000..105da11a
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,737 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_ipsec.h"
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS 5
+
+#define IXGBE_WAIT_RREAD \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+ IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_RWRITE \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+ IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TREAD \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+ IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TWRITE \
+ IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+ IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+
+#define CMP_IP(a, b) (\
+ (a).ipv6[0] == (b).ipv6[0] && \
+ (a).ipv6[1] == (b).ipv6[1] && \
+ (a).ipv6[2] == (b).ipv6[2] && \
+ (a).ipv6[3] == (b).ipv6[3])
+
+
+static void
+ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+ IXGBE_WAIT_RWRITE;
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | index;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+ IXGBE_WAIT_TWRITE;
+ }
+}
+
+static int
+ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
+{
+ struct rte_eth_dev *dev = ic_session->dev;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+ dev->data->dev_private);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip,
+ ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+ /* If no match, find a free entry in the IP table*/
+ if (ip_index < 0) {
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (priv->rx_ip_tbl[i].ref_count == 0) {
+ ip_index = i;
+ break;
+ }
+ }
+ }
+
+ /* Fail if no match and no free entries*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx SA table\n");
+ return -1;
+ }
+
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+ ic_session->dst_ip.ipv6[0];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+ ic_session->dst_ip.ipv6[1];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+ ic_session->dst_ip.ipv6[2];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+ ic_session->dst_ip.ipv6[3];
+ priv->rx_ip_tbl[ip_index].ref_count++;
+
+ priv->rx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+ priv->rx_sa_tbl[sa_index].key[3] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+ priv->rx_sa_tbl[sa_index].key[2] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+ priv->rx_sa_tbl[sa_index].key[1] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+ priv->rx_sa_tbl[sa_index].key[0] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+ priv->rx_sa_tbl[sa_index].salt =
+ rte_cpu_to_be_32(ic_session->salt);
+ priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
+ priv->rx_sa_tbl[sa_index].mode |=
+ (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+ if (ic_session->dst_ip.type == IPv6)
+ priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+ priv->rx_sa_tbl[sa_index].used = 1;
+
+ /* write IP table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_IP | (ip_index << 3);
+ if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv4);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+ }
+ IXGBE_WAIT_RWRITE;
+
+ /* write SPI table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+ priv->rx_sa_tbl[sa_index].spi);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
+ priv->rx_sa_tbl[sa_index].ip_index);
+ IXGBE_WAIT_RWRITE;
+
+ /* write Key table entry*/
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+ IPSRXIDX_TABLE_KEY | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
+ priv->rx_sa_tbl[sa_index].key[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
+ priv->rx_sa_tbl[sa_index].key[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
+ priv->rx_sa_tbl[sa_index].key[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
+ priv->rx_sa_tbl[sa_index].key[3]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
+ priv->rx_sa_tbl[sa_index].salt);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
+ priv->rx_sa_tbl[sa_index].mode);
+ IXGBE_WAIT_RWRITE;
+
+ } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Tx SA table\n");
+ return -1;
+ }
+
+ priv->tx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->tx_sa_tbl[sa_index].key[3] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+ priv->tx_sa_tbl[sa_index].key[2] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+ priv->tx_sa_tbl[sa_index].key[1] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+ priv->tx_sa_tbl[sa_index].key[0] =
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+ priv->tx_sa_tbl[sa_index].salt =
+ rte_cpu_to_be_32(ic_session->salt);
+
+ reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
+ priv->tx_sa_tbl[sa_index].key[0]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
+ priv->tx_sa_tbl[sa_index].key[1]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
+ priv->tx_sa_tbl[sa_index].key[2]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
+ priv->tx_sa_tbl[sa_index].key[3]);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
+ priv->tx_sa_tbl[sa_index].salt);
+ IXGBE_WAIT_TWRITE;
+
+ priv->tx_sa_tbl[i].used = 1;
+ ic_session->sa_index = sa_index;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ struct ixgbe_crypto_session *ic_session)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv =
+ IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+
+ /* Fail if no match*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx SA table\n");
+ return -1;
+ }
+
+ /* Disable and clear Rx SPI and key table table entryes*/
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+ IXGBE_WAIT_RWRITE;
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+ IXGBE_WAIT_RWRITE;
+ priv->rx_sa_tbl[sa_index].used = 0;
+
+ /* If last used then clear the IP table entry*/
+ priv->rx_ip_tbl[ip_index].ref_count--;
+ if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+ reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
+ (ip_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+ }
+ } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a match in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Tx SA table\n");
+ return -1;
+ }
+ reg_val = IPSRXIDX_WRITE | (sa_index << 3);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+ IXGBE_WAIT_TWRITE;
+
+ priv->tx_sa_tbl[sa_index].used = 0;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_crypto_create_session(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *session,
+ struct rte_mempool *mempool)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct ixgbe_crypto_session *ic_session = NULL;
+ struct rte_crypto_aead_xform *aead_xform;
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+ if (rte_mempool_get(mempool, (void **)&ic_session)) {
+ PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+ return -ENOMEM;
+ }
+
+ if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+ conf->crypto_xform->aead.algo !=
+ RTE_CRYPTO_AEAD_AES_GCM) {
+ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+ return -ENOTSUP;
+ }
+ aead_xform = &conf->crypto_xform->aead;
+
+ if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+ return -ENOTSUP;
+ }
+ } else {
+ if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+ return -ENOTSUP;
+ }
+ }
+
+ ic_session->key = aead_xform->key.data;
+ memcpy(&ic_session->salt,
+ &aead_xform->key.data[aead_xform->key.length], 4);
+ ic_session->spi = conf->ipsec.spi;
+ ic_session->dev = eth_dev;
+
+ set_sec_session_private_data(session, ic_session);
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ if (ixgbe_crypto_add_sa(ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to add SA\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_crypto_remove_session(void *device,
+ struct rte_security_session *session)
+{
+ struct rte_eth_dev *eth_dev = device;
+ struct ixgbe_crypto_session *ic_session =
+ (struct ixgbe_crypto_session *)
+ get_sec_session_private_data(session);
+ struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+ if (eth_dev != ic_session->dev) {
+ PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+ return -ENODEV;
+ }
+
+ if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to remove session\n");
+ return -EFAULT;
+ }
+
+ rte_mempool_put(mempool, (void *)ic_session);
+
+ return 0;
+}
+
+static inline uint8_t
+ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+ if (m->nb_segs == 1) {
+ /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+ * payload padding size is stored at <pkt_len - 18>
+ */
+ uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) -
+ (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+ return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+ }
+ return 0;
+}
+
+static int
+ixgbe_crypto_update_mb(void *device __rte_unused,
+ struct rte_security_session *session,
+ struct rte_mbuf *m, void *params __rte_unused)
+{
+ struct ixgbe_crypto_session *ic_session =
+ get_sec_session_private_data(session);
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ union ixgbe_crypto_tx_desc_md *mdata =
+ (union ixgbe_crypto_tx_desc_md *)&m->udata64;
+ mdata->enc = 1;
+ mdata->sa_idx = ic_session->sa_index;
+ mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
+ }
+ return 0;
+}
+
+
+static const struct rte_security_capability *
+ixgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+ static const struct rte_cryptodev_capabilities
+ aes_gcm_gmac_crypto_capabilities[] = {
+ { /* AES GMAC (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ {
+ .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+ }, }
+ },
+ };
+
+ static const struct rte_security_capability
+ ixgbe_security_capabilities[] = {
+ { /* IPsec Inline Crypto ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Transport Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+ };
+
+ return ixgbe_security_capabilities;
+}
+
+
+int
+ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg;
+
+ /* sanity checks */
+ if (dev->data->dev_conf.rxmode.enable_lro) {
+ PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+ return -1;
+ }
+ if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+ return -1;
+ }
+
+
+ /* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ * hang will occur with heavy traffic.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xFFFFFFF0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ if (reg != 0) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+ if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
+ IXGBE_SECTXCTRL_STORE_FORWARD);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+
+ ixgbe_crypto_clear_ipsec_tables(dev);
+
+ return 0;
+}
+
+int
+ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+ struct ixgbe_crypto_session *ic_session
+ = get_sec_session_private_data(sess);
+
+ if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (is_ipv6) {
+ const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+ ic_session->src_ip.type = IPv6;
+ ic_session->dst_ip.type = IPv6;
+ rte_memcpy(ic_session->src_ip.ipv6,
+ ipv6->hdr.src_addr, 16);
+ rte_memcpy(ic_session->dst_ip.ipv6,
+ ipv6->hdr.dst_addr, 16);
+ } else {
+ const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+ ic_session->src_ip.type = IPv4;
+ ic_session->dst_ip.type = IPv4;
+ ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+ ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+ }
+ return ixgbe_crypto_add_sa(ic_session);
+ }
+
+ return 0;
+}
+
+static struct rte_security_ops ixgbe_security_ops = {
+ .session_create = ixgbe_crypto_create_session,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = ixgbe_crypto_remove_session,
+ .set_pkt_metadata = ixgbe_crypto_update_mb,
+ .capabilities_get = ixgbe_crypto_capabilities_get
+};
+
+struct rte_security_ctx *
+ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
+{
+ struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx) {
+ ctx->device = (void *)dev;
+ ctx->ops = &ixgbe_security_ops;
+ ctx->sess_cnt = 0;
+ }
+ return ctx;
+}
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 00000000..fb8fefc8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IXGBE_IPSEC_H_
+#define IXGBE_IPSEC_H_
+
+#include <rte_security.h>
+
+#define IPSRXIDX_RX_EN 0x00000001
+#define IPSRXIDX_TABLE_IP 0x00000002
+#define IPSRXIDX_TABLE_SPI 0x00000004
+#define IPSRXIDX_TABLE_KEY 0x00000006
+#define IPSRXIDX_WRITE 0x80000000
+#define IPSRXIDX_READ 0x40000000
+#define IPSRXMOD_VALID 0x00000001
+#define IPSRXMOD_PROTO 0x00000004
+#define IPSRXMOD_DECRYPT 0x00000008
+#define IPSRXMOD_IPV6 0x00000010
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+#define IPSEC_MAX_RX_IP_COUNT 128
+#define IPSEC_MAX_SA_COUNT 1024
+
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
+enum ixgbe_operation {
+ IXGBE_OP_AUTHENTICATED_ENCRYPTION,
+ IXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum ixgbe_gcm_key {
+ IXGBE_GCM_KEY_128,
+ IXGBE_GCM_KEY_256
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+ enum ipaddr_type {
+ IPv4,
+ IPv6
+ } type;
+ /**< IP Address Type - IPv4/IPv6 */
+
+ union {
+ uint32_t ipv4;
+ uint32_t ipv6[4];
+ };
+};
+
+/** inline crypto crypto private session structure */
+struct ixgbe_crypto_session {
+ enum ixgbe_operation op;
+ uint8_t *key;
+ uint32_t salt;
+ uint32_t sa_index;
+ uint32_t spi;
+ struct ipaddr src_ip;
+ struct ipaddr dst_ip;
+ struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct ixgbe_crypto_rx_ip_table {
+ struct ipaddr ip;
+ uint16_t ref_count;
+};
+struct ixgbe_crypto_rx_sa_table {
+ uint32_t spi;
+ uint32_t ip_index;
+ uint32_t key[4];
+ uint32_t salt;
+ uint8_t mode;
+ uint8_t used;
+};
+
+struct ixgbe_crypto_tx_sa_table {
+ uint32_t spi;
+ uint32_t key[4];
+ uint32_t salt;
+ uint8_t used;
+};
+
+union ixgbe_crypto_tx_desc_md {
+ uint64_t data;
+ struct {
+ /**< SA table index */
+ uint32_t sa_idx;
+ /**< ICV and ESP trailer length */
+ uint8_t pad_len;
+ /**< enable encryption */
+ uint8_t enc;
+ };
+};
+
+struct ixgbe_ipsec {
+ struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+ struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+ struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+
+struct rte_security_ctx *
+ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6);
+
+
+
+#endif /*IXGBE_IPSEC_H_*/
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index c0d86c76..676e92c7 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -627,6 +627,18 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ struct rte_eth_conf *eth_conf;
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+ u8 num_tcs;
+ struct ixgbe_hw *hw;
+ u32 vmvir;
+#define IXGBE_VMVIR_VLANA_MASK 0xC0000000
+#define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
+#define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
+#define VLAN_PRIO_SHIFT 13
+ u32 vlana;
+ u32 vid;
+ u32 user_priority;
/* Verify if the PF supports the mbox APIs version or not */
switch (vfinfo[vf].api_version) {
@@ -645,10 +657,51 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
/* Notify VF of default queue */
msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
- /*
- * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
- * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
- */
+ /* Notify VF of number of DCB traffic classes */
+ eth_conf = &dev->data->dev_conf;
+ switch (eth_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_NONE:
+ case ETH_MQ_TX_DCB:
+ RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u"
+ ", but its tx mode = %d\n", vf,
+ eth_conf->txmode.mq_mode);
+ return -1;
+
+ case ETH_MQ_TX_VMDQ_DCB:
+ vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+ case ETH_16_POOLS:
+ num_tcs = ETH_8_TCS;
+ break;
+ case ETH_32_POOLS:
+ num_tcs = ETH_4_TCS;
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case ETH_MQ_TX_VMDQ_ONLY:
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
+ vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK;
+ user_priority =
+ (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
+ if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) &&
+ ((vid != 0) || (user_priority != 0)))
+ num_tcs = 1;
+ else
+ num_tcs = 0;
+ break;
+
+ default:
+ RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n",
+ eth_conf->txmode.mq_mode);
+ return -1;
+ }
+ msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 64bff258..012d9ee8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -93,6 +93,7 @@
PKT_TX_TCP_SEG | \
PKT_TX_MACSEC | \
PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_SEC_OFFLOAD | \
IXGBE_TX_IEEE1588_TMST)
#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -184,7 +185,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
int i;
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
@@ -207,7 +208,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
uint64_t buf_dma_addr;
uint32_t pkt_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
@@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx = 0;
@@ -479,6 +481,21 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
+#ifdef RTE_LIBRTE_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union ixgbe_crypto_tx_desc_md *md =
+ (union ixgbe_crypto_tx_desc_md *)mdata;
+ seqnum_seed |=
+ (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+ type_tucmd_mlhl |=
+ (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
@@ -657,6 +674,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec;
+#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
@@ -684,6 +704,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIBRTE_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
/* If hardware offload required */
tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
@@ -695,6 +718,15 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec) {
+ union ixgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union ixgbe_crypto_tx_desc_md *)
+ &tx_pkt->udata64;
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
@@ -855,7 +887,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload, &tx_pkt->udata64);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -873,6 +905,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
+#endif
m_seg = tx_pkt;
do {
@@ -888,7 +924,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
@@ -1447,6 +1483,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
+#ifdef RTE_LIBRTE_SECURITY
+ if (rx_status & IXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
return pkt_flags;
}
@@ -1589,7 +1633,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* populate the descriptors */
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
@@ -1821,7 +1865,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -1849,7 +1893,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
@@ -1940,7 +1984,7 @@ ixgbe_fill_cluster_head_buf(
head->port = rxq->port_id;
- /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
@@ -2115,7 +2159,7 @@ next_desc:
if (!bulk_alloc) {
__le64 dma =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/*
* Update RX descriptor with the physical address of the
* new data buffer of the new allocated mbuf.
@@ -2364,8 +2408,11 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
- && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+#ifdef RTE_LIBRTE_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
@@ -2535,6 +2582,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->txq_flags = tx_conf->txq_flags;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIBRTE_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
/*
* Modification to set VFTDT for virtual function if vf is detected
@@ -2548,7 +2599,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -2850,7 +2901,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
@@ -3517,12 +3568,19 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
@@ -3544,12 +3602,18 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
@@ -3565,12 +3629,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
@@ -3586,12 +3656,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
- tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
- (uint8_t)(1 << j);
+ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
}
}
@@ -4112,7 +4188,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
@@ -4494,6 +4570,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
+#ifdef RTE_LIBRTE_SECURITY
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+#endif
}
}
@@ -4981,6 +5061,21 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
+#ifdef RTE_LIBRTE_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY)) {
+ ret = ixgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "ixgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 85feb0bd..cc7c8288 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -138,8 +138,12 @@ struct ixgbe_rx_queue {
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- uint16_t rx_using_sse;
+ uint8_t rx_using_sse;
/**< indicates that vector RX is in use */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec RX feature is in use */
+#endif
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
uint16_t rxrearm_start; /**< the idx we start the re-arming from */
@@ -148,7 +152,7 @@ struct ixgbe_rx_queue {
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
@@ -183,6 +187,11 @@ union ixgbe_tx_offload {
/* fields for TX offloading of tunnels */
uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+#ifdef RTE_LIBRTE_SECURITY
+ /* inline ipsec related*/
+ uint64_t sa_idx:8; /**< TX SA database entry index */
+ uint64_t sec_pad_len:4; /**< padding length */
+#endif
};
};
@@ -237,7 +246,7 @@ struct ixgbe_tx_queue {
uint16_t tx_next_rs; /**< next desc to set RS bit */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
@@ -247,6 +256,10 @@ struct ixgbe_tx_queue {
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
const struct ixgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec TX feature is in use */
+#endif
};
struct ixgbe_txq_ops {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 44de1caa..2e87ffa0 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -87,13 +87,13 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
* Data to be rearmed is 6 bytes long.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
- paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
vst1_u8((uint8_t *)&mb1->rearm_data, p);
- paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
}
@@ -126,8 +126,8 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
} vol;
const uint8x16_t pkttype_msk = {
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ PKT_RX_VLAN, PKT_RX_VLAN,
+ PKT_RX_VLAN, PKT_RX_VLAN,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00};
@@ -414,7 +414,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
{
uint64x2_t descriptor = {
- pkt->buf_physaddr + pkt->data_off,
+ pkt->buf_iova + pkt->data_off,
(uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
vst1q_u64((uint64_t *)&txdp->read, descriptor);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index e704a7f3..486239ba 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -86,8 +86,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
offsetof(struct rte_mbuf, buf_addr) + 8);
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -122,6 +122,43 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
+#ifdef RTE_LIBRTE_SECURITY
+static inline void
+desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i sterr, rearm, tmp_e, tmp_p;
+ uint32_t *rearm0 = (uint32_t *)rx_pkts[0]->rearm_data + 2;
+ uint32_t *rearm1 = (uint32_t *)rx_pkts[1]->rearm_data + 2;
+ uint32_t *rearm2 = (uint32_t *)rx_pkts[2]->rearm_data + 2;
+ uint32_t *rearm3 = (uint32_t *)rx_pkts[3]->rearm_data + 2;
+ const __m128i ipsec_sterr_msk =
+ _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP |
+ IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED);
+ const __m128i ipsec_proc_msk =
+ _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
+ const __m128i ipsec_err_flag =
+ _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
+ PKT_RX_SEC_OFFLOAD);
+ const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
+
+ rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
+ sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
+ _mm_extract_epi32(descs[2], 2),
+ _mm_extract_epi32(descs[1], 2),
+ _mm_extract_epi32(descs[0], 2));
+ sterr = _mm_and_si128(sterr, ipsec_sterr_msk);
+ tmp_e = _mm_cmpeq_epi32(sterr, ipsec_sterr_msk);
+ tmp_p = _mm_cmpeq_epi32(sterr, ipsec_proc_msk);
+ sterr = _mm_or_si128(_mm_and_si128(tmp_e, ipsec_err_flag),
+ _mm_and_si128(tmp_p, ipsec_proc_flag));
+ rearm = _mm_or_si128(rearm, sterr);
+ *rearm0 = _mm_extract_epi32(rearm, 0);
+ *rearm1 = _mm_extract_epi32(rearm, 1);
+ *rearm2 = _mm_extract_epi32(rearm, 2);
+ *rearm3 = _mm_extract_epi32(rearm, 3);
+}
+#endif
+
static inline void
desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
struct rte_mbuf **rx_pkts)
@@ -310,6 +347,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
volatile union ixgbe_adv_rx_desc *rxdp;
struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec = rxq->using_ipsec;
+#endif
int pos;
uint64_t var;
__m128i shuf_msk;
@@ -397,7 +437,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* ensure these 2 flags are in the lower 8 bits */
- RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
vlan_flags = rxq->vlan_flags & UINT8_MAX;
/* A. load 4 packet in one loop
@@ -473,6 +513,11 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* set ol_flags with vlan packet type */
desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
+#ifdef RTE_LIBRTE_SECURITY
+ if (unlikely(use_ipsec))
+ desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]);
+#endif
+
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
@@ -604,7 +649,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp,
{
__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
flags | pkt->data_len,
- pkt->buf_physaddr + pkt->data_off);
+ pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)&txdp->read, descriptor);
}
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index cdcf45cb..ca4182c9 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -312,7 +312,7 @@ ixgbe_shaper_profile_add(struct rte_eth_dev *dev,
if (!shaper_profile)
return -ENOMEM;
shaper_profile->shaper_profile_id = shaper_profile_id;
- (void)rte_memcpy(&shaper_profile->profile, profile,
+ rte_memcpy(&shaper_profile->profile, profile,
sizeof(struct rte_tm_shaper_params));
TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
shaper_profile, node);
@@ -482,7 +482,7 @@ ixgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
}
static int
-ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
+ixgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
uint32_t priority, uint32_t weight,
struct rte_tm_node_params *params,
struct rte_tm_error *error)
@@ -517,8 +517,8 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
return -EINVAL;
}
- /* for root node */
- if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* for non-leaf node */
+ if (node_id >= dev->data->nb_tx_queues) {
/* check the unsupported parameters */
if (params->nonleaf.wfq_weight_mode) {
error->type =
@@ -542,7 +542,7 @@ ixgbe_node_param_check(uint32_t node_id, uint32_t parent_node_id,
return 0;
}
- /* for TC or queue node */
+ /* for leaf node */
/* check the unsupported parameters */
if (params->leaf.cman) {
error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
@@ -588,7 +588,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
enum ixgbe_tm_node_type node_type = IXGBE_TM_NODE_TYPE_MAX;
enum ixgbe_tm_node_type parent_node_type = IXGBE_TM_NODE_TYPE_MAX;
- struct ixgbe_tm_shaper_profile *shaper_profile;
+ struct ixgbe_tm_shaper_profile *shaper_profile = NULL;
struct ixgbe_tm_node *tm_node;
struct ixgbe_tm_node *parent_node;
uint8_t nb_tcs;
@@ -606,7 +606,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
return -EINVAL;
}
- ret = ixgbe_node_param_check(node_id, parent_node_id, priority, weight,
+ ret = ixgbe_node_param_check(dev, node_id, priority, weight,
params, error);
if (ret)
return ret;
@@ -619,12 +619,15 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
/* check the shaper profile id */
- shaper_profile = ixgbe_shaper_profile_search(dev,
- params->shaper_profile_id);
- if (!shaper_profile) {
- error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
- error->message = "shaper profile not exist";
- return -EINVAL;
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = ixgbe_shaper_profile_search(
+ dev, params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
}
/* root node if not have a parent */
@@ -657,12 +660,13 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->no = 0;
tm_node->parent = NULL;
tm_node->shaper_profile = shaper_profile;
- (void)rte_memcpy(&tm_node->params, params,
+ rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
tm_conf->root = tm_node;
/* increase the reference counter of the shaper profile */
- shaper_profile->reference_count++;
+ if (shaper_profile)
+ shaper_profile->reference_count++;
return 0;
}
@@ -737,7 +741,7 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->reference_count = 0;
tm_node->parent = parent_node;
tm_node->shaper_profile = shaper_profile;
- (void)rte_memcpy(&tm_node->params, params,
+ rte_memcpy(&tm_node->params, params,
sizeof(struct rte_tm_node_params));
if (parent_node_type == IXGBE_TM_NODE_TYPE_PORT) {
tm_node->no = parent_node->reference_count;
@@ -753,7 +757,8 @@ ixgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
tm_node->parent->reference_count++;
/* increase the reference counter of the shaper profile */
- shaper_profile->reference_count++;
+ if (shaper_profile)
+ shaper_profile->reference_count++;
return 0;
}
@@ -801,14 +806,16 @@ ixgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
/* root node */
if (node_type == IXGBE_TM_NODE_TYPE_PORT) {
- tm_node->shaper_profile->reference_count--;
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
rte_free(tm_node);
tm_conf->root = NULL;
return 0;
}
/* TC or queue node */
- tm_node->shaper_profile->reference_count--;
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
tm_node->parent->reference_count--;
if (node_type == IXGBE_TM_NODE_TYPE_TC) {
TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
@@ -876,15 +883,34 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->n_nodes_leaf_max = 0;
- cap->non_leaf_nodes_identical = true;
- cap->leaf_nodes_identical = true;
+ } else if (level_id == IXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != IXGBE_TM_NODE_TYPE_QUEUE) {
cap->nonleaf.shaper_private_supported = true;
cap->nonleaf.shaper_private_dual_rate_supported = false;
cap->nonleaf.shaper_private_rate_min = 0;
/* 10Gbps -> 1.25GBps */
cap->nonleaf.shaper_private_rate_max = 1250000000ull;
cap->nonleaf.shaper_shared_n_max = 0;
- cap->nonleaf.sched_n_children_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ if (level_id == IXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ IXGBE_DCB_MAX_TRAFFIC_CLASS;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
cap->nonleaf.sched_sp_n_priorities_max = 1;
cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
cap->nonleaf.sched_wfq_n_groups_max = 0;
@@ -894,21 +920,7 @@ ixgbe_level_capabilities_get(struct rte_eth_dev *dev,
return 0;
}
- /* TC or queue node */
- if (level_id == IXGBE_TM_NODE_TYPE_TC) {
- /* TC */
- cap->n_nodes_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
- cap->n_nodes_nonleaf_max = IXGBE_DCB_MAX_TRAFFIC_CLASS;
- cap->n_nodes_leaf_max = 0;
- cap->non_leaf_nodes_identical = true;
- } else {
- /* queue */
- cap->n_nodes_max = hw->mac.max_tx_queues;
- cap->n_nodes_nonleaf_max = 0;
- cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
- cap->non_leaf_nodes_identical = true;
- }
- cap->leaf_nodes_identical = true;
+ /* queue node */
cap->leaf.shaper_private_supported = true;
cap->leaf.shaper_private_dual_rate_supported = false;
cap->leaf.shaper_private_rate_min = 0;
@@ -998,7 +1010,8 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
goto done;
/* not support port max bandwidth yet */
- if (tm_conf->root->shaper_profile->profile.peak.rate) {
+ if (tm_conf->root->shaper_profile &&
+ tm_conf->root->shaper_profile->profile.peak.rate) {
error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
error->message = "no port max bandwidth";
goto fail_clear;
@@ -1006,7 +1019,8 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
/* HW not support TC max bandwidth */
TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
- if (tm_node->shaper_profile->profile.peak.rate) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
error->message = "no TC max bandwidth";
goto fail_clear;
@@ -1015,7 +1029,10 @@ ixgbe_hierarchy_commit(struct rte_eth_dev *dev,
/* queue max bandwidth */
TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
- bw = tm_node->shaper_profile->profile.peak.rate;
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
if (bw) {
/* interpret Bps to Mbps */
bw = bw * 8 / 1000 / 1000;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index 79897ff6..f1273785 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -38,7 +38,7 @@
#include "rte_pmd_ixgbe.h"
int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr)
{
struct ixgbe_hw *hw;
@@ -73,7 +73,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
}
int
-rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
{
struct ixgbe_hw *hw;
struct ixgbe_vf_info *vfinfo;
@@ -105,7 +105,7 @@ rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
}
int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
struct ixgbe_mac_info *mac;
@@ -135,7 +135,7 @@ rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
struct ixgbe_mac_info *mac;
@@ -164,7 +164,7 @@ rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
{
struct ixgbe_hw *hw;
uint32_t ctrl;
@@ -200,7 +200,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
}
int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t ctrl;
@@ -230,7 +230,7 @@ rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
}
int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t reg_value;
@@ -260,7 +260,7 @@ rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t reg_value;
@@ -295,7 +295,7 @@ rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
@@ -342,7 +342,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on)
{
int val = 0;
@@ -389,7 +389,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
}
int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
@@ -439,7 +439,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
@@ -489,7 +489,7 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
}
int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on)
{
struct rte_eth_dev *dev;
@@ -524,7 +524,7 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
}
int
-rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *dev;
@@ -540,7 +540,7 @@ rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
}
int
-rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
@@ -623,7 +623,7 @@ rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
}
int
-rte_pmd_ixgbe_macsec_disable(uint8_t port)
+rte_pmd_ixgbe_macsec_disable(uint16_t port)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
@@ -687,7 +687,7 @@ rte_pmd_ixgbe_macsec_disable(uint8_t port)
}
int
-rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
@@ -712,7 +712,7 @@ rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
}
int
-rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
@@ -738,7 +738,7 @@ rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
}
int
-rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key)
{
struct ixgbe_hw *hw;
@@ -794,7 +794,7 @@ rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
}
int
-rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key)
{
struct ixgbe_hw *hw;
@@ -837,7 +837,7 @@ rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
}
int
-rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
uint8_t tc_num,
uint8_t *bw_weight)
{
@@ -911,7 +911,7 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
#ifdef RTE_LIBRTE_IXGBE_BYPASS
int
-rte_pmd_ixgbe_bypass_init(uint8_t port_id)
+rte_pmd_ixgbe_bypass_init(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -926,7 +926,7 @@ rte_pmd_ixgbe_bypass_init(uint8_t port_id)
}
int
-rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state)
+rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
{
struct rte_eth_dev *dev;
@@ -940,7 +940,7 @@ rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state)
}
int
-rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
{
struct rte_eth_dev *dev;
@@ -954,7 +954,7 @@ rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state)
}
int
-rte_pmd_ixgbe_bypass_event_show(uint8_t port_id,
+rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
uint32_t event,
uint32_t *state)
{
@@ -970,7 +970,7 @@ rte_pmd_ixgbe_bypass_event_show(uint8_t port_id,
}
int
-rte_pmd_ixgbe_bypass_event_store(uint8_t port_id,
+rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
uint32_t event,
uint32_t state)
{
@@ -986,7 +986,7 @@ rte_pmd_ixgbe_bypass_event_store(uint8_t port_id,
}
int
-rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
{
struct rte_eth_dev *dev;
@@ -1000,7 +1000,7 @@ rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout)
}
int
-rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
{
struct rte_eth_dev *dev;
@@ -1014,7 +1014,7 @@ rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver)
}
int
-rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
{
struct rte_eth_dev *dev;
@@ -1028,7 +1028,7 @@ rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
}
int
-rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id)
+rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index d33c285d..81b18f87 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -53,7 +53,7 @@
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* invalid.
*/
-int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
+int rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf);
/**
* Set the VF MAC address.
@@ -69,7 +69,7 @@ int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr);
/**
@@ -87,7 +87,8 @@ int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf,
+ uint8_t on);
/**
* Enable/Disable VF MAC anti spoofing.
@@ -104,7 +105,7 @@ int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
@@ -122,7 +123,7 @@ int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+int rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id);
/**
@@ -139,7 +140,7 @@ int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf,
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on);
+int rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on);
/**
* set all queues drop enable bit
@@ -155,7 +156,7 @@ int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on);
+int rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on);
/**
* set drop enable bit in the VF split rx control register
@@ -174,7 +175,7 @@ int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan strip for all queues in a pool
@@ -194,7 +195,7 @@ int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable MACsec offload.
@@ -212,7 +213,7 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+int rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp);
/**
* Disable MACsec offload.
@@ -224,7 +225,7 @@ int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+int rte_pmd_ixgbe_macsec_disable(uint16_t port);
/**
* Configure Tx SC (Secure Connection).
@@ -238,7 +239,7 @@ int rte_pmd_ixgbe_macsec_disable(uint8_t port);
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+int rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac);
/**
* Configure Rx SC (Secure Connection).
@@ -254,7 +255,7 @@ int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+int rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi);
/**
* Enable Tx SA (Secure Association).
@@ -275,7 +276,7 @@ int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+int rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key);
/**
@@ -297,7 +298,7 @@ int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key);
/**
@@ -323,7 +324,8 @@ int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on);
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask,
+ uint8_t on);
/**
* Enable or disable a VF traffic receive of an Ethernet device.
@@ -342,7 +344,7 @@ rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable or disable a VF traffic transmit of the Ethernet device.
@@ -361,7 +363,7 @@ rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
@@ -383,7 +385,8 @@ rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on);
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
/**
* Set the rate limitation for a vf on an Ethernet device.
@@ -402,7 +405,8 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
+int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
/**
* Set all the TCs' bandwidth weight.
@@ -423,7 +427,7 @@ int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate,
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
uint8_t tc_num,
uint8_t *bw_weight);
@@ -439,7 +443,7 @@ int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_init(uint8_t port);
+int rte_pmd_ixgbe_bypass_init(uint16_t port);
/**
* Return bypass state.
@@ -456,7 +460,7 @@ int rte_pmd_ixgbe_bypass_init(uint8_t port);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state);
+int rte_pmd_ixgbe_bypass_state_show(uint16_t port, uint32_t *state);
/**
* Set bypass state
@@ -473,7 +477,7 @@ int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state);
+int rte_pmd_ixgbe_bypass_state_set(uint16_t port, uint32_t *new_state);
/**
* Return bypass state when given event occurs.
@@ -497,7 +501,7 @@ int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_event_show(uint8_t port,
+int rte_pmd_ixgbe_bypass_event_show(uint16_t port,
uint32_t event,
uint32_t *state);
@@ -523,7 +527,7 @@ int rte_pmd_ixgbe_bypass_event_show(uint8_t port,
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_event_store(uint8_t port,
+int rte_pmd_ixgbe_bypass_event_store(uint16_t port,
uint32_t event,
uint32_t state);
@@ -547,7 +551,7 @@ int rte_pmd_ixgbe_bypass_event_store(uint8_t port,
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout);
+int rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout);
/**
* Get bypass firmware version.
@@ -561,7 +565,7 @@ int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver);
+int rte_pmd_ixgbe_bypass_ver_show(uint16_t port, uint32_t *ver);
/**
* Return bypass watchdog timeout in seconds
@@ -583,7 +587,7 @@ int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
+int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
/**
* Reset bypass watchdog timer
@@ -595,7 +599,7 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port);
+int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
/**
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
index 46a1ad08..a3f51f92 100644
--- a/drivers/net/kni/Makefile
+++ b/drivers/net/kni/Makefile
@@ -38,6 +38,9 @@ LIB = librte_pmd_kni.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni
+LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_kni_version.map
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 72a2733b..8f269532 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -40,7 +40,7 @@
#include <rte_kni.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
/* Only single queue supported */
#define KNI_MAX_QUEUE_PER_PORT 1
@@ -283,7 +283,7 @@ eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
return 0;
}
-static void
+static int
eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
@@ -320,6 +320,8 @@ eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
stats->oerrors = tx_packets_err_total;
+
+ return 0;
}
static void
@@ -356,8 +358,6 @@ static const struct eth_dev_ops eth_kni_ops = {
.stats_reset = eth_kni_stats_reset,
};
-static struct rte_vdev_driver eth_kni_drv;
-
static struct rte_eth_dev *
eth_kni_create(struct rte_vdev_device *vdev,
struct eth_kni_args *args,
@@ -395,8 +395,6 @@ eth_kni_create(struct rte_vdev_device *vdev,
eth_dev->data = data;
eth_dev->dev_ops = &eth_kni_ops;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
-
internals->no_request_thread = args->no_request_thread;
return eth_dev;
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
index 32c06f5b..5110099f 100644
--- a/drivers/net/liquidio/Makefile
+++ b/drivers/net/liquidio/Makefile
@@ -40,6 +40,9 @@ LIB = librte_pmd_lio.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/base -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_lio_version.map
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c
index e30c20dc..99780178 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.c
+++ b/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -379,25 +379,6 @@ cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
cn23xx_vf_reset_io_queues(lio_dev, num_queues);
}
-void
-cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
-{
- struct lio_mbox_cmd mbox_cmd;
-
- memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
- mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
- mbox_cmd.msg.s.resp_needed = 0;
- mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
- mbox_cmd.msg.s.len = 1;
- mbox_cmd.q_no = 0;
- mbox_cmd.recv_len = 0;
- mbox_cmd.recv_status = 0;
- mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
-
- lio_mbox_write(lio_dev, &mbox_cmd);
-}
-
static void
cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
struct lio_mbox_cmd *cmd, void *arg)
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h
index ad8db0df..83dc053a 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.h
+++ b/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -87,8 +87,6 @@ int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
-void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
-
int cn23xx_pfvf_handshake(struct lio_device *lio_dev);
int cn23xx_vf_setup_device(struct lio_device *lio_dev);
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
index de58c7cc..d4cd23ce 100644
--- a/drivers/net/liquidio/base/lio_hw_defs.h
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -43,10 +43,14 @@
#define LIO_CN23XX_VF_VID 0x9712
/* CN23xx subsystem device ids */
-#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004
-#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005
-#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006
-#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007
+#define PCI_SUBSYS_DEV_ID_CN2350_210 0x0004
+#define PCI_SUBSYS_DEV_ID_CN2360_210 0x0005
+#define PCI_SUBSYS_DEV_ID_CN2360_225 0x0006
+#define PCI_SUBSYS_DEV_ID_CN2350_225 0x0007
+#define PCI_SUBSYS_DEV_ID_CN2350_210SVPN3 0x0008
+#define PCI_SUBSYS_DEV_ID_CN2360_210SVPN3 0x0009
+#define PCI_SUBSYS_DEV_ID_CN2350_210SVPT 0x000a
+#define PCI_SUBSYS_DEV_ID_CN2360_210SVPT 0x000b
/* --------------------------CONFIG VALUES------------------------ */
@@ -106,6 +110,8 @@ enum lio_card_type {
#define LIO_FW_VERSION_LENGTH 32
+#define LIO_VF_TRUST_MIN_VERSION "1.7.1"
+
/** Tag types used by Octeon cores in its work. */
enum octeon_tag_type {
OCTEON_ORDERED_TAG = 0,
@@ -137,6 +143,7 @@ enum octeon_tag_type {
#define LIO_MAX_RX_PKTLEN (64 * 1024)
/* NIC Command types */
+#define LIO_CMD_CHANGE_MTU 0x1
#define LIO_CMD_CHANGE_DEVFLAGS 0x3
#define LIO_CMD_RX_CTL 0x4
#define LIO_CMD_CLEAR_STATS 0x6
@@ -184,6 +191,7 @@ enum octeon_tag_type {
/* Interface flags communicated between host driver and core app. */
enum lio_ifflags {
+ LIO_IFFLAG_PROMISC = 0x01,
LIO_IFFLAG_ALLMULTI = 0x02,
LIO_IFFLAG_UNICAST = 0x10
};
diff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h
index b0875d64..f1c5b8ec 100644
--- a/drivers/net/liquidio/base/lio_mbox.h
+++ b/drivers/net/liquidio/base/lio_mbox.h
@@ -43,7 +43,6 @@
#define LIO_MBOX_DATA_MAX 32
#define LIO_VF_ACTIVE 0x1
-#define LIO_VF_FLR_REQUEST 0x2
#define LIO_CORES_CRASHED 0x3
/* Macro for Read acknowledgment */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 479936a5..4b189661 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -311,7 +311,7 @@ lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
}
/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
-static void
+static int
lio_dev_stats_get(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *stats)
{
@@ -359,6 +359,8 @@ lio_dev_stats_get(struct rte_eth_dev *eth_dev,
stats->ibytes = bytes;
stats->ipackets = pkts;
stats->ierrors = drop;
+
+ return 0;
}
static void
@@ -403,6 +405,10 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
/* CN23xx 10G cards */
case PCI_SUBSYS_DEV_ID_CN2350_210:
case PCI_SUBSYS_DEV_ID_CN2360_210:
+ case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
+ case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
+ case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
+ case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
devinfo->speed_capa = ETH_LINK_SPEED_10G;
break;
/* CN23xx 25G cards */
@@ -411,8 +417,9 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->speed_capa = ETH_LINK_SPEED_25G;
break;
default:
+ devinfo->speed_capa = ETH_LINK_SPEED_10G;
lio_dev_err(lio_dev,
- "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
+ "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
}
devinfo->max_rx_queues = lio_dev->max_rx_queues;
@@ -446,29 +453,64 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
}
static int
-lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
+ uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
PMD_INIT_FUNC_TRACE();
if (!lio_dev->intf_open) {
- lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
+ lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
lio_dev->port_id);
return -EINVAL;
}
- /* Limit the MTU to make sure the ethernet packets are between
- * ETHER_MIN_MTU bytes and PF's MTU
+ /* check if VF MTU is within allowed range.
+ * New value should not exceed PF MTU.
*/
- if ((new_mtu < ETHER_MIN_MTU) ||
- (new_mtu > lio_dev->linfo.link.s.mtu)) {
- lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
- lio_dev_err(lio_dev, "Valid range %d and %d\n",
- ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
+ if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
+ lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
+ ETHER_MIN_MTU, pf_mtu);
return -EINVAL;
}
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
+ ctrl_pkt.ncmd.s.param1 = mtu;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to change MTU timed out\n");
+ return -1;
+ }
+
+ if (frame_len > ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
+ eth_dev->data->mtu = mtu;
+
return 0;
}
@@ -939,6 +981,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
memset(&old, 0, sizeof(old));
/* Return what we found */
@@ -1011,6 +1054,48 @@ lio_change_dev_flag(struct rte_eth_dev *eth_dev)
}
static void
+lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_VF_TRUST_MIN_VERSION);
+ return;
+ }
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
@@ -1333,6 +1418,11 @@ lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
+ if (ls->s.mtu < eth_dev->data->mtu) {
+ lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
+ ls->s.mtu);
+ eth_dev->data->mtu = ls->s.mtu;
+ }
lio_dev->linfo.link.link_status64 = ls->link_status64;
lio_dev_link_update(eth_dev, 0);
}
@@ -1404,35 +1494,22 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
if (lio_dev->linfo.link.link_status64 == 0) {
ret = -1;
- goto dev_mtu_check_error;
+ goto dev_mtu_set_error;
}
- if (eth_dev->data->dev_conf.rxmode.jumbo_frame == 1) {
- if (frame_len <= ETHER_MAX_LEN ||
- frame_len > LIO_MAX_RX_PKTLEN) {
- lio_dev_err(lio_dev, "max packet length should be >= %d and < %d when jumbo frame is enabled\n",
- ETHER_MAX_LEN, LIO_MAX_RX_PKTLEN);
- ret = -EINVAL;
- goto dev_mtu_check_error;
- }
- mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
- } else {
- /* default MTU */
- mtu = ETHER_MTU;
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
- }
+ mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
+ if (mtu < ETHER_MIN_MTU)
+ mtu = ETHER_MIN_MTU;
- if (lio_dev->linfo.link.s.mtu != mtu) {
- ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
+ if (eth_dev->data->mtu != mtu) {
+ ret = lio_dev_mtu_set(eth_dev, mtu);
if (ret)
- goto dev_mtu_check_error;
+ goto dev_mtu_set_error;
}
- eth_dev->data->mtu = mtu;
-
return 0;
-dev_mtu_check_error:
+dev_mtu_set_error:
rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
dev_lsc_handle_error:
@@ -1559,9 +1636,6 @@ lio_dev_close(struct rte_eth_dev *eth_dev)
rte_write32(pkt_count, droq->pkts_sent_reg);
}
- /* Do FLR for the VF */
- cn23xx_vf_ask_pf_to_do_flr(lio_dev);
-
/* lio_free_mbox */
lio_dev->fn_list.free_mbox(lio_dev);
@@ -1721,6 +1795,9 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev)
goto nic_config_fail;
}
+ snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s",
+ resp->cfg_info.lio_firmware_version);
+
lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
sizeof(struct octeon_if_cfg_info) >> 3);
@@ -1824,6 +1901,8 @@ static const struct eth_dev_ops liovf_eth_dev_ops = {
.dev_set_link_up = lio_dev_set_link_up,
.dev_set_link_down = lio_dev_set_link_down,
.dev_close = lio_dev_close,
+ .promiscuous_enable = lio_dev_promiscuous_enable,
+ .promiscuous_disable = lio_dev_promiscuous_disable,
.allmulticast_enable = lio_dev_allmulticast_enable,
.allmulticast_disable = lio_dev_allmulticast_disable,
.link_update = lio_dev_link_update,
@@ -1844,6 +1923,7 @@ static const struct eth_dev_ops liovf_eth_dev_ops = {
.rss_hash_update = lio_dev_rss_hash_update,
.udp_tunnel_port_add = lio_dev_udp_tunnel_add,
.udp_tunnel_port_del = lio_dev_udp_tunnel_del,
+ .mtu_set = lio_dev_mtu_set,
};
static void
@@ -1929,11 +2009,6 @@ lio_first_time_init(struct lio_device *lio_dev,
if (cn23xx_pfvf_handshake(lio_dev))
goto error;
- /* Initial reset */
- cn23xx_vf_ask_pf_to_do_flr(lio_dev);
- /* Wait for FLR for 100ms per SRIOV specification */
- rte_delay_ms(100);
-
if (cn23xx_vf_set_io_queues_off(lio_dev)) {
lio_dev_err(lio_dev, "Setting io queues off failed\n");
goto error;
@@ -2009,7 +2084,6 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pdev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
if (pdev->mem_resource[0].addr) {
lio_dev->hw_addr = pdev->mem_resource[0].addr;
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 2bbb893c..376893ac 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -172,7 +172,7 @@ lio_alloc_info_buffer(struct lio_device *lio_dev,
if (droq->info_mz == NULL)
return NULL;
- droq->info_list_dma = droq->info_mz->phys_addr;
+ droq->info_list_dma = droq->info_mz->iova;
droq->info_alloc_size = droq->info_mz->len;
droq->info_base_addr = (size_t)droq->info_mz->addr;
@@ -222,7 +222,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
return -1;
}
- droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
@@ -734,7 +734,7 @@ lio_init_instr_queue(struct lio_device *lio_dev,
return -1;
}
- iq->base_addr_dma = iq->iq_mz->phys_addr;
+ iq->base_addr_dma = iq->iq_mz->iova;
iq->base_addr = (uint8_t *)iq->iq_mz->addr;
iq->max_count = num_descs;
@@ -1298,7 +1298,7 @@ lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
- sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->dma_addr = rte_mbuf_data_iova(m);
sc->mbuf = m;
dma_addr = sc->dma_addr;
@@ -1739,12 +1739,12 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
cmdsetup.s.u.datasize = pkt_len;
lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
&cmdsetup, tag);
- ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
ndata.reqtype = LIO_REQTYPE_NORESP_NET;
} else {
struct lio_buf_free_info *finfo;
struct lio_gather *g;
- phys_addr_t phyaddr;
+ rte_iova_t phyaddr;
int i, frags;
finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
@@ -1771,7 +1771,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
&cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
- g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[0], m->data_len, 0);
pkt_len = m->data_len;
finfo->mbuf = m;
@@ -1782,7 +1782,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next;
while (frags--) {
g->sg[(i >> 2)].ptr[(i & 3)] =
- rte_mbuf_data_dma_addr(m);
+ rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[(i >> 2)],
m->data_len, (i & 3));
pkt_len += m->data_len;
@@ -1790,8 +1790,8 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next;
}
- phyaddr = rte_mem_virt2phy(g->sg);
- if (phyaddr == RTE_BAD_PHYS_ADDR) {
+ phyaddr = rte_mem_virt2iova(g->sg);
+ if (phyaddr == RTE_BAD_IOVA) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
}
diff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h
index 85685dc7..ef033735 100644
--- a/drivers/net/liquidio/lio_rxtx.h
+++ b/drivers/net/liquidio/lio_rxtx.h
@@ -686,9 +686,9 @@ lio_swap_8B_data(uint64_t *data, uint32_t blocks)
static inline uint64_t
lio_map_ring(void *buf)
{
- phys_addr_t dma_addr;
+ rte_iova_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+ dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
return (uint64_t)dma_addr;
}
@@ -696,7 +696,7 @@ lio_map_ring(void *buf)
static inline uint64_t
lio_map_ring_info(struct lio_droq *droq, uint32_t i)
{
- phys_addr_t dma_addr;
+ rte_iova_t dma_addr;
dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
index d9cbf000..10e3976a 100644
--- a/drivers/net/liquidio/lio_struct.h
+++ b/drivers/net/liquidio/lio_struct.h
@@ -684,6 +684,7 @@ struct lio_device {
uint8_t nb_tx_queues;
uint8_t port_configured;
struct lio_rss_ctx rss_state;
- uint8_t port_id;
+ uint16_t port_id;
+ char firmware_version[LIO_FW_VERSION_LENGTH];
};
#endif /* _LIO_STRUCT_H_ */
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index c045bd79..f1f47c28 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,7 +1,7 @@
# BSD LICENSE
#
-# Copyright 2012-2015 6WIND S.A.
-# Copyright 2012 Mellanox.
+# Copyright 2012 6WIND S.A.
+# Copyright 2012 Mellanox
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -36,7 +36,14 @@ LIB = librte_pmd_mlx4.a
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mr.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -47,7 +54,10 @@ CFLAGS += -D_BSD_SOURCE
CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -libverbs
+LDLIBS += -libverbs -lmlx4
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
# A few warnings cannot be avoided in external headers.
CFLAGS += -Wno-error=cast-qual
@@ -68,22 +78,10 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N
-CFLAGS += -DMLX4_PMD_SGE_WR_N=$(CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N)
-endif
-
-ifdef CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE
-CFLAGS += -DMLX4_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE)
-endif
-
ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE
CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE)
endif
-ifdef CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS
-CFLAGS += -DMLX4_PMD_SOFT_COUNTERS=$(CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS)
-endif
-
ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y)
CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS
endif
@@ -103,23 +101,7 @@ mlx4_autoconf.h.new: FORCE
mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
- $Q sh -- '$<' '$@' \
- RSS_SUPPORT \
- infiniband/verbs.h \
- enum IBV_EXP_DEVICE_UD_RSS $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- INLINE_RECV \
- infiniband/verbs.h \
- enum IBV_EXP_DEVICE_ATTR_INLINE_RECV_SZ $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_EXP_QUERY_DEVICE \
- infiniband/verbs.h \
- type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \
- infiniband/verbs.h \
- enum IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \
- $(AUTOCONF_OUTPUT)
+ $Q : > '$@'
# Create mlx4_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 055de49a..f9e4f9d7 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2017 6WIND S.A.
- * Copyright 2012-2017 Mellanox.
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,95 +31,52 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * Known limitations:
- * - RSS hash key and options cannot be modified.
- * - Hardware counters aren't implemented.
+/**
+ * @file
+ * mlx4 driver initialization.
*/
-/* System headers. */
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
#include <stddef.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <limits.h>
-#include <assert.h>
-#include <arpa/inet.h>
-#include <net/if.h>
-#include <dirent.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <linux/ethtool.h>
-#include <linux/sockios.h>
-#include <fcntl.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_ethdev_pci.h>
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
#include <rte_dev.h>
-#include <rte_mbuf.h>
#include <rte_errno.h>
-#include <rte_mempool.h>
-#include <rte_prefetch.h>
-#include <rte_malloc.h>
-#include <rte_spinlock.h>
-#include <rte_atomic.h>
-#include <rte_version.h>
-#include <rte_log.h>
-#include <rte_alarm.h>
-#include <rte_memory.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ether.h>
#include <rte_flow.h>
-#include <rte_kvargs.h>
#include <rte_interrupts.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
-/* Generated configuration header. */
-#include "mlx4_autoconf.h"
-
-/* PMD headers. */
#include "mlx4.h"
#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
-/* Convenience macros for accessing mbuf fields. */
-#define NEXT(m) ((m)->next)
-#define DATA_LEN(m) ((m)->data_len)
-#define PKT_LEN(m) ((m)->pkt_len)
-#define DATA_OFF(m) ((m)->data_off)
-#define SET_DATA_OFF(m, o) ((m)->data_off = (o))
-#define NB_SEGS(m) ((m)->nb_segs)
-#define PORT(m) ((m)->port)
-
-/* Work Request ID data type (64 bit). */
-typedef union {
- struct {
- uint32_t id;
- uint16_t offset;
- } data;
- uint64_t raw;
-} wr_id_t;
-
-#define WR_ID(o) (((wr_id_t *)&(o))->data)
-
-/* Transpose flags. Useful to convert IBV to DPDK flags. */
-#define TRANSPOSE(val, from, to) \
- (((from) >= (to)) ? \
- (((val) & (from)) / ((from) / (to))) : \
- (((val) & (from)) * ((to) / (from))))
-
-/* Local storage for secondary process data. */
-struct mlx4_secondary_data {
- struct rte_eth_dev_data data; /* Local device data. */
- struct priv *primary_priv; /* Private structure from primary. */
- struct rte_eth_dev_data *shared_dev_data; /* Shared device data. */
- rte_spinlock_t lock; /* Port configuration lock. */
-} mlx4_secondary_data[RTE_MAX_ETHPORTS];
-
+/** Configuration structure for device arguments. */
struct mlx4_conf {
- uint8_t active_ports;
+ struct {
+ uint32_t present; /**< Bit-field for existing ports. */
+ uint32_t enabled; /**< Bit-field for user-enabled ports. */
+ } ports;
};
/* Available parameters list. */
@@ -128,593 +85,6 @@ const char *pmd_mlx4_init_params[] = {
NULL,
};
-static int
-mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
-
-static int
-mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
-
-static int
-priv_rx_intr_vec_enable(struct priv *priv);
-
-static void
-priv_rx_intr_vec_disable(struct priv *priv);
-
-/**
- * Check if running as a secondary process.
- *
- * @return
- * Nonzero if running as a secondary process.
- */
-static inline int
-mlx4_is_secondary(void)
-{
- return rte_eal_process_type() != RTE_PROC_PRIMARY;
-}
-
-/**
- * Return private structure associated with an Ethernet device.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * Pointer to private structure.
- */
-static struct priv *
-mlx4_get_priv(struct rte_eth_dev *dev)
-{
- struct mlx4_secondary_data *sd;
-
- if (!mlx4_is_secondary())
- return dev->data->dev_private;
- sd = &mlx4_secondary_data[dev->data->port_id];
- return sd->data.dev_private;
-}
-
-/**
- * Lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- */
-void priv_lock(struct priv *priv)
-{
- rte_spinlock_lock(&priv->lock);
-}
-
-/**
- * Unlock private structure.
- *
- * @param priv
- * Pointer to private structure.
- */
-void priv_unlock(struct priv *priv)
-{
- rte_spinlock_unlock(&priv->lock);
-}
-
-/* Allocate a buffer on the stack and fill it with a printf format string. */
-#define MKSTR(name, ...) \
- char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
- \
- snprintf(name, sizeof(name), __VA_ARGS__)
-
-/**
- * Get interface name from private structure.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[out] ifname
- * Interface name output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
-{
- DIR *dir;
- struct dirent *dent;
- unsigned int dev_type = 0;
- unsigned int dev_port_prev = ~0u;
- char match[IF_NAMESIZE] = "";
-
- {
- MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
-
- dir = opendir(path);
- if (dir == NULL)
- return -1;
- }
- while ((dent = readdir(dir)) != NULL) {
- char *name = dent->d_name;
- FILE *file;
- unsigned int dev_port;
- int r;
-
- if ((name[0] == '.') &&
- ((name[1] == '\0') ||
- ((name[1] == '.') && (name[2] == '\0'))))
- continue;
-
- MKSTR(path, "%s/device/net/%s/%s",
- priv->ctx->device->ibdev_path, name,
- (dev_type ? "dev_id" : "dev_port"));
-
- file = fopen(path, "rb");
- if (file == NULL) {
- if (errno != ENOENT)
- continue;
- /*
- * Switch to dev_id when dev_port does not exist as
- * is the case with Linux kernel versions < 3.15.
- */
-try_dev_id:
- match[0] = '\0';
- if (dev_type)
- break;
- dev_type = 1;
- dev_port_prev = ~0u;
- rewinddir(dir);
- continue;
- }
- r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
- fclose(file);
- if (r != 1)
- continue;
- /*
- * Switch to dev_id when dev_port returns the same value for
- * all ports. May happen when using a MOFED release older than
- * 3.0 with a Linux kernel >= 3.15.
- */
- if (dev_port == dev_port_prev)
- goto try_dev_id;
- dev_port_prev = dev_port;
- if (dev_port == (priv->port - 1u))
- snprintf(match, sizeof(match), "%s", name);
- }
- closedir(dir);
- if (match[0] == '\0')
- return -1;
- strncpy(*ifname, match, sizeof(*ifname));
- return 0;
-}
-
-/**
- * Read from sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[out] buf
- * Data output buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_sysfs_read(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
- if (file == NULL)
- return -1;
- ret = fread(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) && (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
-}
-
-/**
- * Write to sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[in] buf
- * Data buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_sysfs_write(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "wb");
- if (file == NULL)
- return -1;
- ret = fwrite(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) || (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
-}
-
-/**
- * Get unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param[out] value
- * Value output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
-{
- int ret;
- unsigned long value_ret;
- char value_str[32];
-
- ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret == -1) {
- DEBUG("cannot read %s value from sysfs: %s",
- name, strerror(errno));
- return -1;
- }
- value_str[ret] = '\0';
- errno = 0;
- value_ret = strtoul(value_str, NULL, 0);
- if (errno) {
- DEBUG("invalid %s value `%s': %s", name, value_str,
- strerror(errno));
- return -1;
- }
- *value = value_ret;
- return 0;
-}
-
-/**
- * Set unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param value
- * Value to set.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
-{
- int ret;
- MKSTR(value_str, "%lu", value);
-
- ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret == -1) {
- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
- name, value_str, value, strerror(errno));
- return -1;
- }
- return 0;
-}
-
-/**
- * Perform ifreq ioctl() on associated Ethernet device.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param req
- * Request number to pass to ioctl().
- * @param[out] ifr
- * Interface request structure output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
-{
- int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
- int ret = -1;
-
- if (sock == -1)
- return ret;
- if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
- ret = ioctl(sock, req, ifr);
- close(sock);
- return ret;
-}
-
-/**
- * Get device MTU.
- *
- * @param priv
- * Pointer to private structure.
- * @param[out] mtu
- * MTU value output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_mtu(struct priv *priv, uint16_t *mtu)
-{
- unsigned long ulong_mtu;
-
- if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
- return -1;
- *mtu = ulong_mtu;
- return 0;
-}
-
-/**
- * Set device MTU.
- *
- * @param priv
- * Pointer to private structure.
- * @param mtu
- * MTU value to set.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_set_mtu(struct priv *priv, uint16_t mtu)
-{
- uint16_t new_mtu;
-
- if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
- priv_get_mtu(priv, &new_mtu))
- return -1;
- if (new_mtu == mtu)
- return 0;
- errno = EINVAL;
- return -1;
-}
-
-/**
- * Set device flags.
- *
- * @param priv
- * Pointer to private structure.
- * @param keep
- * Bitmask for flags that must remain untouched.
- * @param flags
- * Bitmask for flags to modify.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
-{
- unsigned long tmp;
-
- if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
- return -1;
- tmp &= keep;
- tmp |= (flags & (~keep));
- return priv_set_sysfs_ulong(priv, "flags", tmp);
-}
-
-/* Device configuration. */
-
-static int
-txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf);
-
-static void
-txq_cleanup(struct txq *txq);
-
-static int
-rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive,
- const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp, int children_n,
- struct rxq *rxq_parent);
-
-static void
-rxq_cleanup(struct rxq *rxq);
-
-/**
- * Create RSS parent queue.
- *
- * The new parent is inserted in front of the list in the private structure.
- *
- * @param priv
- * Pointer to private structure.
- * @param queues
- * Queues indices array, if NULL use all Rx queues.
- * @param children_n
- * The number of entries in queues[].
- *
- * @return
- * Pointer to a parent rxq structure, NULL on failure.
- */
-struct rxq *
-priv_parent_create(struct priv *priv,
- uint16_t queues[],
- uint16_t children_n)
-{
- int ret;
- uint16_t i;
- struct rxq *parent;
-
- parent = rte_zmalloc("parent queue",
- sizeof(*parent),
- RTE_CACHE_LINE_SIZE);
- if (!parent) {
- ERROR("cannot allocate memory for RSS parent queue");
- return NULL;
- }
- ret = rxq_setup(priv->dev, parent, 0, 0, 0,
- NULL, NULL, children_n, NULL);
- if (ret) {
- rte_free(parent);
- return NULL;
- }
- parent->rss.queues_n = children_n;
- if (queues) {
- for (i = 0; i < children_n; ++i)
- parent->rss.queues[i] = queues[i];
- } else {
- /* the default RSS ring case */
- assert(priv->rxqs_n == children_n);
- for (i = 0; i < priv->rxqs_n; ++i)
- parent->rss.queues[i] = i;
- }
- LIST_INSERT_HEAD(&priv->parents, parent, next);
- return parent;
-}
-
-/**
- * Clean up RX queue parent structure.
- *
- * @param parent
- * RX queue parent structure.
- */
-void
-rxq_parent_cleanup(struct rxq *parent)
-{
- LIST_REMOVE(parent, next);
- rxq_cleanup(parent);
- rte_free(parent);
-}
-
-/**
- * Clean up parent structures from the parent list.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_parent_list_cleanup(struct priv *priv)
-{
- while (!LIST_EMPTY(&priv->parents))
- rxq_parent_cleanup(LIST_FIRST(&priv->parents));
-}
-
-/**
- * Ethernet device configuration.
- *
- * Prepare the driver for a given number of TX and RX queues.
- * Allocate parent RSS queue when several RX queues are requested.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-dev_configure(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int rxqs_n = dev->data->nb_rx_queues;
- unsigned int txqs_n = dev->data->nb_tx_queues;
- unsigned int tmp;
-
- priv->rxqs = (void *)dev->data->rx_queues;
- priv->txqs = (void *)dev->data->tx_queues;
- if (txqs_n != priv->txqs_n) {
- INFO("%p: TX queues number update: %u -> %u",
- (void *)dev, priv->txqs_n, txqs_n);
- priv->txqs_n = txqs_n;
- }
- if (rxqs_n == priv->rxqs_n)
- return 0;
- if (!rte_is_power_of_2(rxqs_n) && !priv->isolated) {
- unsigned n_active;
-
- n_active = rte_align32pow2(rxqs_n + 1) >> 1;
- WARN("%p: number of RX queues must be a power"
- " of 2: %u queues among %u will be active",
- (void *)dev, n_active, rxqs_n);
- }
-
- INFO("%p: RX queues number update: %u -> %u",
- (void *)dev, priv->rxqs_n, rxqs_n);
- /* If RSS is enabled, disable it first. */
- if (priv->rss) {
- unsigned int i;
-
- /* Only if there are no remaining child RX queues. */
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] != NULL)
- return EINVAL;
- priv_parent_list_cleanup(priv);
- priv->rss = 0;
- priv->rxqs_n = 0;
- }
- if (rxqs_n <= 1) {
- /* Nothing else to do. */
- priv->rxqs_n = rxqs_n;
- return 0;
- }
- /* Allocate a new RSS parent queue if supported by hardware. */
- if (!priv->hw_rss) {
- ERROR("%p: only a single RX queue can be configured when"
- " hardware doesn't support RSS",
- (void *)dev);
- return EINVAL;
- }
- /* Fail if hardware doesn't support that many RSS queues. */
- if (rxqs_n >= priv->max_rss_tbl_sz) {
- ERROR("%p: only %u RX queues can be configured for RSS",
- (void *)dev, priv->max_rss_tbl_sz);
- return EINVAL;
- }
- priv->rss = 1;
- tmp = priv->rxqs_n;
- priv->rxqs_n = rxqs_n;
- if (priv->isolated)
- return 0;
- if (priv_parent_create(priv, NULL, priv->rxqs_n))
- return 0;
- /* Failure, rollback. */
- priv->rss = 0;
- priv->rxqs_n = tmp;
- return ENOMEM;
-}
-
/**
* DPDK callback for Ethernet device configuration.
*
@@ -722,3490 +92,78 @@ dev_configure(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
mlx4_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
int ret;
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- ret = dev_configure(dev);
- assert(ret >= 0);
- priv_unlock(priv);
- return -ret;
-}
-
-static uint16_t mlx4_tx_burst(void *, struct rte_mbuf **, uint16_t);
-static uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
-
-/**
- * Configure secondary process queues from a private data pointer (primary
- * or secondary) and update burst callbacks. Can take place only once.
- *
- * All queues must have been previously created by the primary process to
- * avoid undefined behavior.
- *
- * @param priv
- * Private data pointer from either primary or secondary process.
- *
- * @return
- * Private data pointer from secondary process, NULL in case of error.
- */
-static struct priv *
-mlx4_secondary_data_setup(struct priv *priv)
-{
- unsigned int port_id = 0;
- struct mlx4_secondary_data *sd;
- void **tx_queues;
- void **rx_queues;
- unsigned int nb_tx_queues;
- unsigned int nb_rx_queues;
- unsigned int i;
-
- /* priv must be valid at this point. */
- assert(priv != NULL);
- /* priv->dev must also be valid but may point to local memory from
- * another process, possibly with the same address and must not
- * be dereferenced yet. */
- assert(priv->dev != NULL);
- /* Determine port ID by finding out where priv comes from. */
- while (1) {
- sd = &mlx4_secondary_data[port_id];
- rte_spinlock_lock(&sd->lock);
- /* Primary process? */
- if (sd->primary_priv == priv)
- break;
- /* Secondary process? */
- if (sd->data.dev_private == priv)
- break;
- rte_spinlock_unlock(&sd->lock);
- if (++port_id == RTE_DIM(mlx4_secondary_data))
- port_id = 0;
- }
- /* Switch to secondary private structure. If private data has already
- * been updated by another thread, there is nothing else to do. */
- priv = sd->data.dev_private;
- if (priv->dev->data == &sd->data)
- goto end;
- /* Sanity checks. Secondary private structure is supposed to point
- * to local eth_dev, itself still pointing to the shared device data
- * structure allocated by the primary process. */
- assert(sd->shared_dev_data != &sd->data);
- assert(sd->data.nb_tx_queues == 0);
- assert(sd->data.tx_queues == NULL);
- assert(sd->data.nb_rx_queues == 0);
- assert(sd->data.rx_queues == NULL);
- assert(priv != sd->primary_priv);
- assert(priv->dev->data == sd->shared_dev_data);
- assert(priv->txqs_n == 0);
- assert(priv->txqs == NULL);
- assert(priv->rxqs_n == 0);
- assert(priv->rxqs == NULL);
- nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
- nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
- /* Allocate local storage for queues. */
- tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
- sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
- RTE_CACHE_LINE_SIZE);
- rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
- sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
- RTE_CACHE_LINE_SIZE);
- if (tx_queues == NULL || rx_queues == NULL)
- goto error;
- /* Lock to prevent control operations during setup. */
- priv_lock(priv);
- /* TX queues. */
- for (i = 0; i != nb_tx_queues; ++i) {
- struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
- struct txq *txq;
-
- if (primary_txq == NULL)
- continue;
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0,
- primary_txq->socket);
- if (txq != NULL) {
- if (txq_setup(priv->dev,
- txq,
- primary_txq->elts_n * MLX4_PMD_SGE_WR_N,
- primary_txq->socket,
- NULL) == 0) {
- txq->stats.idx = primary_txq->stats.idx;
- tx_queues[i] = txq;
- continue;
- }
- rte_free(txq);
- }
- while (i) {
- txq = tx_queues[--i];
- txq_cleanup(txq);
- rte_free(txq);
- }
- goto error;
- }
- /* RX queues. */
- for (i = 0; i != nb_rx_queues; ++i) {
- struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i];
-
- if (primary_rxq == NULL)
- continue;
- /* Not supported yet. */
- rx_queues[i] = NULL;
- }
- /* Update everything. */
- priv->txqs = (void *)tx_queues;
- priv->txqs_n = nb_tx_queues;
- priv->rxqs = (void *)rx_queues;
- priv->rxqs_n = nb_rx_queues;
- sd->data.rx_queues = rx_queues;
- sd->data.tx_queues = tx_queues;
- sd->data.nb_rx_queues = nb_rx_queues;
- sd->data.nb_tx_queues = nb_tx_queues;
- sd->data.dev_link = sd->shared_dev_data->dev_link;
- sd->data.mtu = sd->shared_dev_data->mtu;
- memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
- sizeof(sd->data.rx_queue_state));
- memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
- sizeof(sd->data.tx_queue_state));
- sd->data.dev_flags = sd->shared_dev_data->dev_flags;
- /* Use local data from now on. */
- rte_mb();
- priv->dev->data = &sd->data;
- rte_mb();
- priv->dev->tx_pkt_burst = mlx4_tx_burst;
- priv->dev->rx_pkt_burst = removed_rx_burst;
- priv_unlock(priv);
-end:
- /* More sanity checks. */
- assert(priv->dev->tx_pkt_burst == mlx4_tx_burst);
- assert(priv->dev->rx_pkt_burst == removed_rx_burst);
- assert(priv->dev->data == &sd->data);
- rte_spinlock_unlock(&sd->lock);
- return priv;
-error:
- priv_unlock(priv);
- rte_free(tx_queues);
- rte_free(rx_queues);
- rte_spinlock_unlock(&sd->lock);
- return NULL;
-}
-
-/* TX queues handling. */
-
-/**
- * Allocate TX queue elements.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param elts_n
- * Number of elements to allocate.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-txq_alloc_elts(struct txq *txq, unsigned int elts_n)
-{
- unsigned int i;
- struct txq_elt (*elts)[elts_n] =
- rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
- linear_t (*elts_linear)[elts_n] =
- rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0,
- txq->socket);
- struct ibv_mr *mr_linear = NULL;
- int ret = 0;
-
- if ((elts == NULL) || (elts_linear == NULL)) {
- ERROR("%p: can't allocate packets array", (void *)txq);
- ret = ENOMEM;
- goto error;
- }
- mr_linear =
- ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear),
- IBV_ACCESS_LOCAL_WRITE);
- if (mr_linear == NULL) {
- ERROR("%p: unable to configure MR, ibv_reg_mr() failed",
- (void *)txq);
- ret = EINVAL;
- goto error;
- }
- for (i = 0; (i != elts_n); ++i) {
- struct txq_elt *elt = &(*elts)[i];
-
- elt->buf = NULL;
- }
- DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
- txq->elts_n = elts_n;
- txq->elts = elts;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- /* Request send completion every MLX4_PMD_TX_PER_COMP_REQ packets or
- * at least 4 times per ring. */
- txq->elts_comp_cd_init =
- ((MLX4_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ?
- MLX4_PMD_TX_PER_COMP_REQ : (elts_n / 4));
- txq->elts_comp_cd = txq->elts_comp_cd_init;
- txq->elts_linear = elts_linear;
- txq->mr_linear = mr_linear;
- assert(ret == 0);
- return 0;
-error:
- if (mr_linear != NULL)
- claim_zero(ibv_dereg_mr(mr_linear));
-
- rte_free(elts_linear);
- rte_free(elts);
-
- DEBUG("%p: failed, freed everything", (void *)txq);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * Free TX queue elements.
- *
- * @param txq
- * Pointer to TX queue structure.
- */
-static void
-txq_free_elts(struct txq *txq)
-{
- unsigned int elts_n = txq->elts_n;
- unsigned int elts_head = txq->elts_head;
- unsigned int elts_tail = txq->elts_tail;
- struct txq_elt (*elts)[elts_n] = txq->elts;
- linear_t (*elts_linear)[elts_n] = txq->elts_linear;
- struct ibv_mr *mr_linear = txq->mr_linear;
-
- DEBUG("%p: freeing WRs", (void *)txq);
- txq->elts_n = 0;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- txq->elts_comp_cd = 0;
- txq->elts_comp_cd_init = 0;
- txq->elts = NULL;
- txq->elts_linear = NULL;
- txq->mr_linear = NULL;
- if (mr_linear != NULL)
- claim_zero(ibv_dereg_mr(mr_linear));
-
- rte_free(elts_linear);
- if (elts == NULL)
- return;
- while (elts_tail != elts_head) {
- struct txq_elt *elt = &(*elts)[elts_tail];
-
- assert(elt->buf != NULL);
- rte_pktmbuf_free(elt->buf);
-#ifndef NDEBUG
- /* Poisoning. */
- memset(elt, 0x77, sizeof(*elt));
-#endif
- if (++elts_tail == elts_n)
- elts_tail = 0;
- }
- rte_free(elts);
-}
-
-
-/**
- * Clean up a TX queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param txq
- * Pointer to TX queue structure.
- */
-static void
-txq_cleanup(struct txq *txq)
-{
- struct ibv_exp_release_intf_params params;
- size_t i;
-
- DEBUG("cleaning up %p", (void *)txq);
- txq_free_elts(txq);
- if (txq->if_qp != NULL) {
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- assert(txq->qp != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq->priv->ctx,
- txq->if_qp,
- &params));
- }
- if (txq->if_cq != NULL) {
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- assert(txq->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq->priv->ctx,
- txq->if_cq,
- &params));
- }
- if (txq->qp != NULL)
- claim_zero(ibv_destroy_qp(txq->qp));
- if (txq->cq != NULL)
- claim_zero(ibv_destroy_cq(txq->cq));
- if (txq->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
- txq->rd,
- &attr));
- }
- for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
- if (txq->mp2mr[i].mp == NULL)
- break;
- assert(txq->mp2mr[i].mr != NULL);
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
- }
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * Manage TX completions.
- *
- * When sending a burst, mlx4_tx_burst() posts several WRs.
- * To improve performance, a completion event is only required once every
- * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
- * for other WRs, but this information would not be used anyway.
- *
- * @param txq
- * Pointer to TX queue structure.
- *
- * @return
- * 0 on success, -1 on failure.
- */
-static int
-txq_complete(struct txq *txq)
-{
- unsigned int elts_comp = txq->elts_comp;
- unsigned int elts_tail = txq->elts_tail;
- const unsigned int elts_n = txq->elts_n;
- int wcs_n;
-
- if (unlikely(elts_comp == 0))
- return 0;
-#ifdef DEBUG_SEND
- DEBUG("%p: processing %u work requests completions",
- (void *)txq, elts_comp);
-#endif
- wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
- if (unlikely(wcs_n == 0))
- return 0;
- if (unlikely(wcs_n < 0)) {
- DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
- (void *)txq, wcs_n);
- return -1;
- }
- elts_comp -= wcs_n;
- assert(elts_comp <= txq->elts_comp);
- /*
- * Assume WC status is successful as nothing can be done about it
- * anyway.
- */
- elts_tail += wcs_n * txq->elts_comp_cd_init;
- if (elts_tail >= elts_n)
- elts_tail -= elts_n;
- txq->elts_tail = elts_tail;
- txq->elts_comp = elts_comp;
- return 0;
-}
-
-struct mlx4_check_mempool_data {
- int ret;
- char *start;
- char *end;
-};
-
-/* Called by mlx4_check_mempool() when iterating the memory chunks. */
-static void mlx4_check_mempool_cb(struct rte_mempool *mp,
- void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned mem_idx)
-{
- struct mlx4_check_mempool_data *data = opaque;
-
- (void)mp;
- (void)mem_idx;
-
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
- return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
- return;
- }
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
- }
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
- return;
- }
- /* Error, mempool is not virtually contigous. */
- data->ret = -1;
-}
-
-/**
- * Check if a mempool can be used: it must be virtually contiguous.
- *
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area
- *
- * @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
- */
-static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
-{
- struct mlx4_check_mempool_data data;
-
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
-
- return data.ret;
-}
-
-/* For best performance, this function should not be inlined. */
-static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
- __rte_noinline;
-
-/**
- * Register mempool as a memory region.
- *
- * @param pd
- * Pointer to protection domain.
- * @param mp
- * Pointer to memory pool.
- *
- * @return
- * Memory region pointer, NULL in case of error.
- */
-static struct ibv_mr *
-mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
-{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
-
- if (mlx4_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
- }
-
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
- }
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- return ibv_reg_mr(pd,
- (void *)start,
- end - start,
- IBV_ACCESS_LOCAL_WRITE);
-}
-
-/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static struct rte_mempool *
-txq_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
-/**
- * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- *
- * @return
- * mr->lkey on success, (uint32_t)-1 on failure.
- */
-static uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-{
- unsigned int i;
- struct ibv_mr *mr;
-
- for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i].mp == mp) {
- assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
- }
- }
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (void *)mp);
- mr = mlx4_mp2mr(txq->priv->pd, mp);
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq);
- return (uint32_t)-1;
- }
- if (unlikely(i == elemof(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq);
- --i;
- claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
- }
- /* Store the new entry. */
- txq->mp2mr[i].mp = mp;
- txq->mp2mr[i].mr = mr;
- txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
-}
-
-struct txq_mp2mr_mbuf_check_data {
- int ret;
-};
-
-/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
- */
-static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
-{
- struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
-
- /* Check whether mbuf structure fits element size and whether mempool
- * pointer is valid. */
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
-}
-
-/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
- *
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to TX queue structure.
- */
-static void
-txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
-{
- struct txq *txq = arg;
- struct txq_mp2mr_mbuf_check_data data = {
- .ret = 0,
- };
-
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- txq_mp2mr(txq, mp);
-}
-
-#if MLX4_PMD_SGE_WR_N > 1
-
-/**
- * Copy scattered mbuf contents to a single linear buffer.
- *
- * @param[out] linear
- * Linear output buffer.
- * @param[in] buf
- * Scattered input buffer.
- *
- * @return
- * Number of bytes copied to the output buffer or 0 if not large enough.
- */
-static unsigned int
-linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
-{
- unsigned int size = 0;
- unsigned int offset;
-
- do {
- unsigned int len = DATA_LEN(buf);
-
- offset = size;
- size += len;
- if (unlikely(size > sizeof(*linear)))
- return 0;
- memcpy(&(*linear)[offset],
- rte_pktmbuf_mtod(buf, uint8_t *),
- len);
- buf = NEXT(buf);
- } while (buf != NULL);
- return size;
-}
-
-/**
- * Handle scattered buffers for mlx4_tx_burst().
- *
- * @param txq
- * TX queue structure.
- * @param segs
- * Number of segments in buf.
- * @param elt
- * TX queue element to fill.
- * @param[in] buf
- * Buffer to process.
- * @param elts_head
- * Index of the linear buffer to use if necessary (normally txq->elts_head).
- * @param[out] sges
- * Array filled with SGEs on success.
- *
- * @return
- * A structure containing the processed packet size in bytes and the
- * number of SGEs. Both fields are set to (unsigned int)-1 in case of
- * failure.
- */
-static struct tx_burst_sg_ret {
- unsigned int length;
- unsigned int num;
-}
-tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
- struct rte_mbuf *buf, unsigned int elts_head,
- struct ibv_sge (*sges)[MLX4_PMD_SGE_WR_N])
-{
- unsigned int sent_size = 0;
- unsigned int j;
- int linearize = 0;
-
- /* When there are too many segments, extra segments are
- * linearized in the last SGE. */
- if (unlikely(segs > elemof(*sges))) {
- segs = (elemof(*sges) - 1);
- linearize = 1;
- }
- /* Update element. */
- elt->buf = buf;
- /* Register segments as SGEs. */
- for (j = 0; (j != segs); ++j) {
- struct ibv_sge *sge = &(*sges)[j];
- uint32_t lkey;
-
- /* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, txq_mb2mp(buf));
- if (unlikely(lkey == (uint32_t)-1)) {
- /* MR does not exist. */
- DEBUG("%p: unable to get MP <-> MR association",
- (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
- }
- /* Update SGE. */
- sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
- if (txq->priv->vf)
- rte_prefetch0((volatile void *)
- (uintptr_t)sge->addr);
- sge->length = DATA_LEN(buf);
- sge->lkey = lkey;
- sent_size += sge->length;
- buf = NEXT(buf);
- }
- /* If buf is not NULL here and is not going to be linearized,
- * nb_segs is not valid. */
- assert(j == segs);
- assert((buf == NULL) || (linearize));
- /* Linearize extra segments. */
- if (linearize) {
- struct ibv_sge *sge = &(*sges)[segs];
- linear_t *linear = &(*txq->elts_linear)[elts_head];
- unsigned int size = linearize_mbuf(linear, buf);
-
- assert(segs == (elemof(*sges) - 1));
- if (size == 0) {
- /* Invalid packet. */
- DEBUG("%p: packet too large to be linearized.",
- (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
- }
- /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately. */
- if (elemof(*sges) == 1) {
- do {
- struct rte_mbuf *next = NEXT(buf);
-
- rte_pktmbuf_free_seg(buf);
- buf = next;
- } while (buf != NULL);
- elt->buf = NULL;
- }
- /* Update SGE. */
- sge->addr = (uintptr_t)&(*linear)[0];
- sge->length = size;
- sge->lkey = txq->mr_linear->lkey;
- sent_size += size;
- /* Include last segment. */
- segs++;
- }
- return (struct tx_burst_sg_ret){
- .length = sent_size,
- .num = segs,
- };
-stop:
- return (struct tx_burst_sg_ret){
- .length = -1,
- .num = -1,
- };
-}
-
-#endif /* MLX4_PMD_SGE_WR_N > 1 */
-
-/**
- * DPDK callback for TX.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static uint16_t
-mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct txq *txq = (struct txq *)dpdk_txq;
- unsigned int elts_head = txq->elts_head;
- const unsigned int elts_n = txq->elts_n;
- unsigned int elts_comp_cd = txq->elts_comp_cd;
- unsigned int elts_comp = 0;
- unsigned int i;
- unsigned int max;
- int err;
-
- assert(elts_comp_cd != 0);
- txq_complete(txq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
- assert(max >= 1);
- assert(max <= elts_n);
- /* Always leave one free entry in the ring. */
- --max;
- if (max == 0)
- return 0;
- if (max > pkts_n)
- max = pkts_n;
- for (i = 0; (i != max); ++i) {
- struct rte_mbuf *buf = pkts[i];
- unsigned int elts_head_next =
- (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
- struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
- struct txq_elt *elt = &(*txq->elts)[elts_head];
- unsigned int segs = NB_SEGS(buf);
-#ifdef MLX4_PMD_SOFT_COUNTERS
- unsigned int sent_size = 0;
-#endif
- uint32_t send_flags = 0;
-
- /* Clean up old buffer. */
- if (likely(elt->buf != NULL)) {
- struct rte_mbuf *tmp = elt->buf;
-
-#ifndef NDEBUG
- /* Poisoning. */
- memset(elt, 0x66, sizeof(*elt));
-#endif
- /* Faster than rte_pktmbuf_free(). */
- do {
- struct rte_mbuf *next = NEXT(tmp);
-
- rte_pktmbuf_free_seg(tmp);
- tmp = next;
- } while (tmp != NULL);
- }
- /* Request TX completion. */
- if (unlikely(--elts_comp_cd == 0)) {
- elts_comp_cd = txq->elts_comp_cd_init;
- ++elts_comp;
- send_flags |= IBV_EXP_QP_BURST_SIGNALED;
- }
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
- /* HW does not support checksum offloads at arbitrary
- * offsets but automatically recognizes the packet
- * type. For inner L3/L4 checksums, only VXLAN (UDP)
- * tunnels are currently supported. */
- if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
- send_flags |= IBV_EXP_QP_BURST_TUNNEL;
- }
- if (likely(segs == 1)) {
- uintptr_t addr;
- uint32_t length;
- uint32_t lkey;
-
- /* Retrieve buffer information. */
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
- /* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, txq_mb2mp(buf));
- if (unlikely(lkey == (uint32_t)-1)) {
- /* MR does not exist. */
- DEBUG("%p: unable to get MP <-> MR"
- " association", (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
- }
- /* Update element. */
- elt->buf = buf;
- if (txq->priv->vf)
- rte_prefetch0((volatile void *)
- (uintptr_t)addr);
- RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
- /* Put packet into send queue. */
-#if MLX4_PMD_MAX_INLINE > 0
- if (length <= txq->max_inline)
- err = txq->if_qp->send_pending_inline
- (txq->qp,
- (void *)addr,
- length,
- send_flags);
- else
-#endif
- err = txq->if_qp->send_pending
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags);
- if (unlikely(err))
- goto stop;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- sent_size += length;
-#endif
- } else {
-#if MLX4_PMD_SGE_WR_N > 1
- struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
- struct tx_burst_sg_ret ret;
-
- ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
- &sges);
- if (ret.length == (unsigned int)-1)
- goto stop;
- RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
- /* Put SG list into send queue. */
- err = txq->if_qp->send_pending_sg_list
- (txq->qp,
- sges,
- ret.num,
- send_flags);
- if (unlikely(err))
- goto stop;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- sent_size += ret.length;
-#endif
-#else /* MLX4_PMD_SGE_WR_N > 1 */
- DEBUG("%p: TX scattered buffers support not"
- " compiled in", (void *)txq);
- goto stop;
-#endif /* MLX4_PMD_SGE_WR_N > 1 */
- }
- elts_head = elts_head_next;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increment sent bytes counter. */
- txq->stats.obytes += sent_size;
-#endif
- }
-stop:
- /* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
- return 0;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increment sent packets counter. */
- txq->stats.opackets += i;
-#endif
- /* Ring QP doorbell. */
- err = txq->if_qp->send_flush(txq->qp);
- if (unlikely(err)) {
- /* A nonzero value is not supposed to be returned.
- * Nothing can be done about it. */
- DEBUG("%p: send_flush() failed with error %d",
- (void *)txq, err);
- }
- txq->elts_head = elts_head;
- txq->elts_comp += elts_comp;
- txq->elts_comp_cd = elts_comp_cd;
- return i;
-}
-
-/**
- * DPDK callback for TX in secondary processes.
- *
- * This function configures all queues from primary process information
- * if necessary before reverting to the normal TX burst callback.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static uint16_t
-mlx4_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct txq *txq = dpdk_txq;
- struct priv *priv = mlx4_secondary_data_setup(txq->priv);
- struct priv *primary_priv;
- unsigned int index;
-
- if (priv == NULL)
- return 0;
- primary_priv =
- mlx4_secondary_data[priv->dev->data->port_id].primary_priv;
- /* Look for queue index in both private structures. */
- for (index = 0; index != priv->txqs_n; ++index)
- if (((*primary_priv->txqs)[index] == txq) ||
- ((*priv->txqs)[index] == txq))
- break;
- if (index == priv->txqs_n)
- return 0;
- txq = (*priv->txqs)[index];
- return priv->dev->tx_pkt_burst(txq, pkts, pkts_n);
-}
-
-/**
- * Configure a TX queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param txq
- * Pointer to TX queue structure.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
-{
- struct priv *priv = mlx4_get_priv(dev);
- struct txq tmpl = {
- .priv = priv,
- .socket = socket
- };
- union {
- struct ibv_exp_query_intf_params params;
- struct ibv_exp_qp_init_attr init;
- struct ibv_exp_res_domain_init_attr rd;
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_qp_attr mod;
- } attr;
- enum ibv_exp_query_intf_status status;
- int ret = 0;
-
- (void)conf; /* Thresholds configuration (ignored). */
- if (priv == NULL)
- return EINVAL;
- if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
- ERROR("%p: invalid number of TX descriptors (must be a"
- " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
- return EINVAL;
- }
- desc /= MLX4_PMD_SGE_WR_N;
- /* MRs will be registered in mp2mr[] later. */
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
- };
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
- if (tmpl.cq == NULL) {
- ret = ENOMEM;
- ERROR("%p: CQ creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- attr.init = (struct ibv_exp_qp_init_attr){
- /* CQ to be associated with the send queue. */
- .send_cq = tmpl.cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = tmpl.cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_send_sge = ((priv->device_attr.max_sge <
- MLX4_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX4_PMD_SGE_WR_N),
-#if MLX4_PMD_MAX_INLINE > 0
- .max_inline_data = MLX4_PMD_MAX_INLINE,
-#endif
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- /* Do *NOT* enable this, completions events are managed per
- * TX burst. */
- .sq_sig_all = 0,
- .pd = priv->pd,
- .res_domain = tmpl.rd,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
- };
- tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
- if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
-#if MLX4_PMD_MAX_INLINE > 0
- /* ibv_create_qp() updates this value. */
- tmpl.max_inline = attr.init.cap.max_inline_data;
-#endif
- attr.mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
- (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- ret = txq_alloc_elts(&tmpl, desc);
- if (ret) {
- ERROR("%p: TXQ allocation failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- attr.mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .obj = tmpl.qp,
-#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK
- /* MC loopback must be disabled when not using a VF. */
- .family_flags =
- (!priv->vf ?
- IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK :
- 0),
-#endif
- };
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ERROR("%p: QP interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- /* Clean up txq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
- txq_cleanup(txq);
- *txq = tmpl;
- DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
- /* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq);
- assert(ret == 0);
- return 0;
-error:
- txq_cleanup(&tmpl);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * DPDK callback to configure a TX queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * TX queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
-{
- struct priv *priv = dev->data->dev_private;
- struct txq *txq = (*priv->txqs)[idx];
- int ret;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
- }
- if (txq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)txq);
- if (priv->started) {
- priv_unlock(priv);
- return -EEXIST;
- }
- (*priv->txqs)[idx] = NULL;
- txq_cleanup(txq);
- } else {
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
- if (txq == NULL) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
- }
- ret = txq_setup(dev, txq, desc, socket, conf);
- if (ret)
- rte_free(txq);
- else {
- txq->stats.idx = idx;
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq);
- (*priv->txqs)[idx] = txq;
- /* Update send callback. */
- dev->tx_pkt_burst = mlx4_tx_burst;
- }
- priv_unlock(priv);
- return -ret;
-}
-
-/**
- * DPDK callback to release a TX queue.
- *
- * @param dpdk_txq
- * Generic TX queue pointer.
- */
-static void
-mlx4_tx_queue_release(void *dpdk_txq)
-{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct priv *priv;
- unsigned int i;
-
- if (mlx4_is_secondary())
- return;
- if (txq == NULL)
- return;
- priv = txq->priv;
- priv_lock(priv);
- for (i = 0; (i != priv->txqs_n); ++i)
- if ((*priv->txqs)[i] == txq) {
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq);
- (*priv->txqs)[i] = NULL;
- break;
- }
- txq_cleanup(txq);
- rte_free(txq);
- priv_unlock(priv);
-}
-
-/* RX queues handling. */
-
-/**
- * Allocate RX queue elements with scattered packets support.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param elts_n
- * Number of elements to allocate.
- * @param[in] pool
- * If not NULL, fetch buffers from this array instead of allocating them
- * with rte_pktmbuf_alloc().
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n,
- struct rte_mbuf **pool)
-{
- unsigned int i;
- struct rxq_elt_sp (*elts)[elts_n] =
- rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
- rxq->socket);
- int ret = 0;
-
- if (elts == NULL) {
- ERROR("%p: can't allocate packets array", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- /* For each WR (packet). */
- for (i = 0; (i != elts_n); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
- struct ibv_recv_wr *wr = &elt->wr;
- struct ibv_sge (*sges)[(elemof(elt->sges))] = &elt->sges;
-
- /* These two arrays must have the same size. */
- assert(elemof(elt->sges) == elemof(elt->bufs));
- /* Configure WR. */
- wr->wr_id = i;
- wr->next = &(*elts)[(i + 1)].wr;
- wr->sg_list = &(*sges)[0];
- wr->num_sge = elemof(*sges);
- /* For each SGE (segment). */
- for (j = 0; (j != elemof(elt->bufs)); ++j) {
- struct ibv_sge *sge = &(*sges)[j];
- struct rte_mbuf *buf;
-
- if (pool != NULL) {
- buf = *(pool++);
- assert(buf != NULL);
- rte_pktmbuf_reset(buf);
- } else
- buf = rte_pktmbuf_alloc(rxq->mp);
- if (buf == NULL) {
- assert(pool == NULL);
- ERROR("%p: empty mbuf pool", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- elt->bufs[j] = buf;
- /* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
- /* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
- /* sge->addr must be able to store a pointer. */
- assert(sizeof(sge->addr) >= sizeof(uintptr_t));
- if (j == 0) {
- /* The first SGE keeps its headroom. */
- sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
- sge->length = (buf->buf_len -
- RTE_PKTMBUF_HEADROOM);
- } else {
- /* Subsequent SGEs lose theirs. */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
- SET_DATA_OFF(buf, 0);
- sge->addr = (uintptr_t)buf->buf_addr;
- sge->length = buf->buf_len;
- }
- sge->lkey = rxq->mr->lkey;
- /* Redundant check for tailroom. */
- assert(sge->length == rte_pktmbuf_tailroom(buf));
- }
- }
- /* The last WR pointer must be NULL. */
- (*elts)[(i - 1)].wr.next = NULL;
- DEBUG("%p: allocated and configured %u WRs (%zu segments)",
- (void *)rxq, elts_n, (elts_n * elemof((*elts)[0].sges)));
- rxq->elts_n = elts_n;
- rxq->elts_head = 0;
- rxq->elts.sp = elts;
- assert(ret == 0);
- return 0;
-error:
- if (elts != NULL) {
- assert(pool == NULL);
- for (i = 0; (i != elemof(*elts)); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
-
- for (j = 0; (j != elemof(elt->bufs)); ++j) {
- struct rte_mbuf *buf = elt->bufs[j];
-
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
- }
- }
- rte_free(elts);
- }
- DEBUG("%p: failed, freed everything", (void *)rxq);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * Free RX queue elements with scattered packets support.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_free_elts_sp(struct rxq *rxq)
-{
- unsigned int i;
- unsigned int elts_n = rxq->elts_n;
- struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp;
-
- DEBUG("%p: freeing WRs", (void *)rxq);
- rxq->elts_n = 0;
- rxq->elts.sp = NULL;
- if (elts == NULL)
- return;
- for (i = 0; (i != elemof(*elts)); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
-
- for (j = 0; (j != elemof(elt->bufs)); ++j) {
- struct rte_mbuf *buf = elt->bufs[j];
-
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
- }
- }
- rte_free(elts);
-}
-
-/**
- * Allocate RX queue elements.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param elts_n
- * Number of elements to allocate.
- * @param[in] pool
- * If not NULL, fetch buffers from this array instead of allocating them
- * with rte_pktmbuf_alloc().
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool)
-{
- unsigned int i;
- struct rxq_elt (*elts)[elts_n] =
- rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
- rxq->socket);
- int ret = 0;
-
- if (elts == NULL) {
- ERROR("%p: can't allocate packets array", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- /* For each WR (packet). */
- for (i = 0; (i != elts_n); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct ibv_recv_wr *wr = &elt->wr;
- struct ibv_sge *sge = &(*elts)[i].sge;
- struct rte_mbuf *buf;
-
- if (pool != NULL) {
- buf = *(pool++);
- assert(buf != NULL);
- rte_pktmbuf_reset(buf);
- } else
- buf = rte_pktmbuf_alloc(rxq->mp);
- if (buf == NULL) {
- assert(pool == NULL);
- ERROR("%p: empty mbuf pool", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- /* Configure WR. Work request ID contains its own index in
- * the elts array and the offset between SGE buffer header and
- * its data. */
- WR_ID(wr->wr_id).id = i;
- WR_ID(wr->wr_id).offset =
- (((uintptr_t)buf->buf_addr + RTE_PKTMBUF_HEADROOM) -
- (uintptr_t)buf);
- wr->next = &(*elts)[(i + 1)].wr;
- wr->sg_list = sge;
- wr->num_sge = 1;
- /* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
- /* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
- /* sge->addr must be able to store a pointer. */
- assert(sizeof(sge->addr) >= sizeof(uintptr_t));
- /* SGE keeps its headroom. */
- sge->addr = (uintptr_t)
- ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
- sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
- sge->lkey = rxq->mr->lkey;
- /* Redundant check for tailroom. */
- assert(sge->length == rte_pktmbuf_tailroom(buf));
- /* Make sure elts index and SGE mbuf pointer can be deduced
- * from WR ID. */
- if ((WR_ID(wr->wr_id).id != i) ||
- ((void *)((uintptr_t)sge->addr -
- WR_ID(wr->wr_id).offset) != buf)) {
- ERROR("%p: cannot store index and offset in WR ID",
- (void *)rxq);
- sge->addr = 0;
- rte_pktmbuf_free(buf);
- ret = EOVERFLOW;
- goto error;
- }
- }
- /* The last WR pointer must be NULL. */
- (*elts)[(i - 1)].wr.next = NULL;
- DEBUG("%p: allocated and configured %u single-segment WRs",
- (void *)rxq, elts_n);
- rxq->elts_n = elts_n;
- rxq->elts_head = 0;
- rxq->elts.no_sp = elts;
- assert(ret == 0);
- return 0;
-error:
- if (elts != NULL) {
- assert(pool == NULL);
- for (i = 0; (i != elemof(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf;
-
- if (elt->sge.addr == 0)
- continue;
- assert(WR_ID(elt->wr.wr_id).id == i);
- buf = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(elt->wr.wr_id).offset);
- rte_pktmbuf_free_seg(buf);
- }
- rte_free(elts);
- }
- DEBUG("%p: failed, freed everything", (void *)rxq);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * Free RX queue elements.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_free_elts(struct rxq *rxq)
-{
- unsigned int i;
- unsigned int elts_n = rxq->elts_n;
- struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp;
-
- DEBUG("%p: freeing WRs", (void *)rxq);
- rxq->elts_n = 0;
- rxq->elts.no_sp = NULL;
- if (elts == NULL)
- return;
- for (i = 0; (i != elemof(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf;
-
- if (elt->sge.addr == 0)
- continue;
- assert(WR_ID(elt->wr.wr_id).id == i);
- buf = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(elt->wr.wr_id).offset);
- rte_pktmbuf_free_seg(buf);
- }
- rte_free(elts);
-}
-
-/**
- * Delete flow steering rule.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param mac_index
- * MAC address index.
- * @param vlan_index
- * VLAN index.
- */
-static void
-rxq_del_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
-{
-#ifndef NDEBUG
- struct priv *priv = rxq->priv;
- const uint8_t (*mac)[ETHER_ADDR_LEN] =
- (const uint8_t (*)[ETHER_ADDR_LEN])
- priv->mac[mac_index].addr_bytes;
-#endif
- assert(rxq->mac_flow[mac_index][vlan_index] != NULL);
- DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
- " (VLAN ID %" PRIu16 ")",
- (void *)rxq,
- (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
- mac_index, priv->vlan_filter[vlan_index].id);
- claim_zero(ibv_destroy_flow(rxq->mac_flow[mac_index][vlan_index]));
- rxq->mac_flow[mac_index][vlan_index] = NULL;
-}
-
-/**
- * Unregister a MAC address from a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param mac_index
- * MAC address index.
- */
-static void
-rxq_mac_addr_del(struct rxq *rxq, unsigned int mac_index)
-{
- struct priv *priv = rxq->priv;
- unsigned int i;
- unsigned int vlans = 0;
-
- assert(mac_index < elemof(priv->mac));
- if (!BITFIELD_ISSET(rxq->mac_configured, mac_index))
- return;
- for (i = 0; (i != elemof(priv->vlan_filter)); ++i) {
- if (!priv->vlan_filter[i].enabled)
- continue;
- rxq_del_flow(rxq, mac_index, i);
- vlans++;
- }
- if (!vlans) {
- rxq_del_flow(rxq, mac_index, 0);
- }
- BITFIELD_RESET(rxq->mac_configured, mac_index);
-}
-
-/**
- * Unregister all MAC addresses from a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_mac_addrs_del(struct rxq *rxq)
-{
- struct priv *priv = rxq->priv;
- unsigned int i;
-
- for (i = 0; (i != elemof(priv->mac)); ++i)
- rxq_mac_addr_del(rxq, i);
-}
-
-static int rxq_promiscuous_enable(struct rxq *);
-static void rxq_promiscuous_disable(struct rxq *);
-
-/**
- * Add single flow steering rule.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param mac_index
- * MAC address index to register.
- * @param vlan_index
- * VLAN index. Use -1 for a flow without VLAN.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_add_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
-{
- struct ibv_flow *flow;
- struct priv *priv = rxq->priv;
- const uint8_t (*mac)[ETHER_ADDR_LEN] =
- (const uint8_t (*)[ETHER_ADDR_LEN])
- priv->mac[mac_index].addr_bytes;
-
- /* Allocate flow specification on the stack. */
- struct __attribute__((packed)) {
- struct ibv_flow_attr attr;
- struct ibv_flow_spec_eth spec;
- } data;
- struct ibv_flow_attr *attr = &data.attr;
- struct ibv_flow_spec_eth *spec = &data.spec;
-
- assert(mac_index < elemof(priv->mac));
- assert((vlan_index < elemof(priv->vlan_filter)) || (vlan_index == -1u));
- /*
- * No padding must be inserted by the compiler between attr and spec.
- * This layout is expected by libibverbs.
- */
- assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
- *attr = (struct ibv_flow_attr){
- .type = IBV_FLOW_ATTR_NORMAL,
- .priority = 3,
- .num_of_specs = 1,
- .port = priv->port,
- .flags = 0
- };
- *spec = (struct ibv_flow_spec_eth){
- .type = IBV_FLOW_SPEC_ETH,
- .size = sizeof(*spec),
- .val = {
- .dst_mac = {
- (*mac)[0], (*mac)[1], (*mac)[2],
- (*mac)[3], (*mac)[4], (*mac)[5]
- },
- .vlan_tag = ((vlan_index != -1u) ?
- htons(priv->vlan_filter[vlan_index].id) :
- 0),
- },
- .mask = {
- .dst_mac = "\xff\xff\xff\xff\xff\xff",
- .vlan_tag = ((vlan_index != -1u) ? htons(0xfff) : 0),
- }
- };
- DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
- " (VLAN %s %" PRIu16 ")",
- (void *)rxq,
- (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
- mac_index,
- ((vlan_index != -1u) ? "ID" : "index"),
- ((vlan_index != -1u) ? priv->vlan_filter[vlan_index].id : -1u));
- /* Create related flow. */
- errno = 0;
- flow = ibv_create_flow(rxq->qp, attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow configuration failed, errno=%d: %s",
- (void *)rxq, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
- if (vlan_index == -1u)
- vlan_index = 0;
- assert(rxq->mac_flow[mac_index][vlan_index] == NULL);
- rxq->mac_flow[mac_index][vlan_index] = flow;
- return 0;
-}
-
-/**
- * Register a MAC address in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param mac_index
- * MAC address index to register.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_mac_addr_add(struct rxq *rxq, unsigned int mac_index)
-{
- struct priv *priv = rxq->priv;
- unsigned int i;
- unsigned int vlans = 0;
- int ret;
-
- assert(mac_index < elemof(priv->mac));
- if (BITFIELD_ISSET(rxq->mac_configured, mac_index))
- rxq_mac_addr_del(rxq, mac_index);
- /* Fill VLAN specifications. */
- for (i = 0; (i != elemof(priv->vlan_filter)); ++i) {
- if (!priv->vlan_filter[i].enabled)
- continue;
- /* Create related flow. */
- ret = rxq_add_flow(rxq, mac_index, i);
- if (!ret) {
- vlans++;
- continue;
- }
- /* Failure, rollback. */
- while (i != 0)
- if (priv->vlan_filter[--i].enabled)
- rxq_del_flow(rxq, mac_index, i);
- assert(ret > 0);
- return ret;
- }
- /* In case there is no VLAN filter. */
- if (!vlans) {
- ret = rxq_add_flow(rxq, mac_index, -1);
- if (ret)
- return ret;
- }
- BITFIELD_SET(rxq->mac_configured, mac_index);
- return 0;
-}
-
-/**
- * Register all MAC addresses in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_mac_addrs_add(struct rxq *rxq)
-{
- struct priv *priv = rxq->priv;
- unsigned int i;
- int ret;
-
- for (i = 0; (i != elemof(priv->mac)); ++i) {
- if (!BITFIELD_ISSET(priv->mac_configured, i))
- continue;
- ret = rxq_mac_addr_add(rxq, i);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- rxq_mac_addr_del(rxq, --i);
- assert(ret > 0);
- return ret;
- }
- return 0;
-}
-
-/**
- * Unregister a MAC address.
- *
- * In RSS mode, the MAC address is unregistered from the parent queue,
- * otherwise it is unregistered from each queue directly.
- *
- * @param priv
- * Pointer to private structure.
- * @param mac_index
- * MAC address index.
- */
-static void
-priv_mac_addr_del(struct priv *priv, unsigned int mac_index)
-{
- unsigned int i;
-
- assert(!priv->isolated);
- assert(mac_index < elemof(priv->mac));
- if (!BITFIELD_ISSET(priv->mac_configured, mac_index))
- return;
- if (priv->rss) {
- rxq_mac_addr_del(LIST_FIRST(&priv->parents), mac_index);
- goto end;
- }
- for (i = 0; (i != priv->dev->data->nb_rx_queues); ++i)
- rxq_mac_addr_del((*priv->rxqs)[i], mac_index);
-end:
- BITFIELD_RESET(priv->mac_configured, mac_index);
-}
-
-/**
- * Register a MAC address.
- *
- * In RSS mode, the MAC address is registered in the parent queue,
- * otherwise it is registered in each queue directly.
- *
- * @param priv
- * Pointer to private structure.
- * @param mac_index
- * MAC address index to use.
- * @param mac
- * MAC address to register.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_mac_addr_add(struct priv *priv, unsigned int mac_index,
- const uint8_t (*mac)[ETHER_ADDR_LEN])
-{
- unsigned int i;
- int ret;
-
- assert(mac_index < elemof(priv->mac));
- /* First, make sure this address isn't already configured. */
- for (i = 0; (i != elemof(priv->mac)); ++i) {
- /* Skip this index, it's going to be reconfigured. */
- if (i == mac_index)
- continue;
- if (!BITFIELD_ISSET(priv->mac_configured, i))
- continue;
- if (memcmp(priv->mac[i].addr_bytes, *mac, sizeof(*mac)))
- continue;
- /* Address already configured elsewhere, return with error. */
- return EADDRINUSE;
- }
- if (BITFIELD_ISSET(priv->mac_configured, mac_index))
- priv_mac_addr_del(priv, mac_index);
- priv->mac[mac_index] = (struct ether_addr){
- {
- (*mac)[0], (*mac)[1], (*mac)[2],
- (*mac)[3], (*mac)[4], (*mac)[5]
- }
- };
- /* If device isn't started, this is all we need to do. */
- if (!priv->started) {
-#ifndef NDEBUG
- /* Verify that all queues have this index disabled. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- assert(!BITFIELD_ISSET
- ((*priv->rxqs)[i]->mac_configured, mac_index));
- }
-#endif
- goto end;
- }
- if (priv->rss) {
- ret = rxq_mac_addr_add(LIST_FIRST(&priv->parents), mac_index);
- if (ret)
- return ret;
- goto end;
- }
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- ret = rxq_mac_addr_add((*priv->rxqs)[i], mac_index);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- if ((*priv->rxqs)[(--i)] != NULL)
- rxq_mac_addr_del((*priv->rxqs)[i], mac_index);
- return ret;
- }
-end:
- BITFIELD_SET(priv->mac_configured, mac_index);
- return 0;
-}
-
-/**
- * Enable allmulti mode in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_allmulticast_enable(struct rxq *rxq)
-{
- struct ibv_flow *flow;
- struct ibv_flow_attr attr = {
- .type = IBV_FLOW_ATTR_MC_DEFAULT,
- .num_of_specs = 0,
- .port = rxq->priv->port,
- .flags = 0
- };
-
- DEBUG("%p: enabling allmulticast mode", (void *)rxq);
- if (rxq->allmulti_flow != NULL)
- return EBUSY;
- errno = 0;
- flow = ibv_create_flow(rxq->qp, &attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow configuration failed, errno=%d: %s",
- (void *)rxq, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
- rxq->allmulti_flow = flow;
- DEBUG("%p: allmulticast mode enabled", (void *)rxq);
- return 0;
-}
-
-/**
- * Disable allmulti mode in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_allmulticast_disable(struct rxq *rxq)
-{
- DEBUG("%p: disabling allmulticast mode", (void *)rxq);
- if (rxq->allmulti_flow == NULL)
- return;
- claim_zero(ibv_destroy_flow(rxq->allmulti_flow));
- rxq->allmulti_flow = NULL;
- DEBUG("%p: allmulticast mode disabled", (void *)rxq);
-}
-
-/**
- * Enable promiscuous mode in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_promiscuous_enable(struct rxq *rxq)
-{
- struct ibv_flow *flow;
- struct ibv_flow_attr attr = {
- .type = IBV_FLOW_ATTR_ALL_DEFAULT,
- .num_of_specs = 0,
- .port = rxq->priv->port,
- .flags = 0
- };
-
- if (rxq->priv->vf)
- return 0;
- DEBUG("%p: enabling promiscuous mode", (void *)rxq);
- if (rxq->promisc_flow != NULL)
- return EBUSY;
- errno = 0;
- flow = ibv_create_flow(rxq->qp, &attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow configuration failed, errno=%d: %s",
- (void *)rxq, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
- rxq->promisc_flow = flow;
- DEBUG("%p: promiscuous mode enabled", (void *)rxq);
- return 0;
-}
-
-/**
- * Disable promiscuous mode in a RX queue.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_promiscuous_disable(struct rxq *rxq)
-{
- if (rxq->priv->vf)
- return;
- DEBUG("%p: disabling promiscuous mode", (void *)rxq);
- if (rxq->promisc_flow == NULL)
- return;
- claim_zero(ibv_destroy_flow(rxq->promisc_flow));
- rxq->promisc_flow = NULL;
- DEBUG("%p: promiscuous mode disabled", (void *)rxq);
-}
-
-/**
- * Clean up a RX queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_cleanup(struct rxq *rxq)
-{
- struct ibv_exp_release_intf_params params;
-
- DEBUG("cleaning up %p", (void *)rxq);
- if (rxq->sp)
- rxq_free_elts_sp(rxq);
- else
- rxq_free_elts(rxq);
- if (rxq->if_qp != NULL) {
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- assert(rxq->qp != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
- rxq->if_qp,
- &params));
- }
- if (rxq->if_cq != NULL) {
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- assert(rxq->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
- rxq->if_cq,
- &params));
- }
- if (rxq->qp != NULL && !rxq->priv->isolated) {
- rxq_promiscuous_disable(rxq);
- rxq_allmulticast_disable(rxq);
- rxq_mac_addrs_del(rxq);
- }
- if (rxq->qp != NULL)
- claim_zero(ibv_destroy_qp(rxq->qp));
- if (rxq->cq != NULL)
- claim_zero(ibv_destroy_cq(rxq->cq));
- if (rxq->channel != NULL)
- claim_zero(ibv_destroy_comp_channel(rxq->channel));
- if (rxq->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
- rxq->rd,
- &attr));
- }
- if (rxq->mr != NULL)
- claim_zero(ibv_dereg_mr(rxq->mr));
- memset(rxq, 0, sizeof(*rxq));
-}
-
-/**
- * Translate RX completion flags to packet type.
- *
- * @param flags
- * RX completion flags returned by poll_length_flags().
- *
- * @note: fix mlx4_dev_supported_ptypes_get() if any change here.
- *
- * @return
- * Packet type for struct rte_mbuf.
- */
-static inline uint32_t
-rxq_cq_to_pkt_type(uint32_t flags)
-{
- uint32_t pkt_type;
-
- if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
- pkt_type =
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV4_PACKET,
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV6_PACKET,
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
- else
- pkt_type =
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV4_PACKET,
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV6_PACKET,
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN);
- return pkt_type;
-}
-
-/**
- * Translate RX completion flags to offload flags.
- *
- * @param[in] rxq
- * Pointer to RX queue structure.
- * @param flags
- * RX completion flags returned by poll_length_flags().
- *
- * @return
- * Offload flags (ol_flags) for struct rte_mbuf.
- */
-static inline uint32_t
-rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
-{
- uint32_t ol_flags = 0;
-
- if (rxq->csum)
- ol_flags |=
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_GOOD);
- if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
- ol_flags |=
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_GOOD);
- return ol_flags;
-}
-
-static uint16_t
-mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
-
-/**
- * DPDK callback for RX with scattered packets support.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-static uint16_t
-mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
- const unsigned int elts_n = rxq->elts_n;
- unsigned int elts_head = rxq->elts_head;
- struct ibv_recv_wr head;
- struct ibv_recv_wr **next = &head.next;
- struct ibv_recv_wr *bad_wr;
- unsigned int i;
- unsigned int pkts_ret = 0;
- int ret;
-
- if (unlikely(!rxq->sp))
- return mlx4_rx_burst(dpdk_rxq, pkts, pkts_n);
- if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
- return 0;
- for (i = 0; (i != pkts_n); ++i) {
- struct rxq_elt_sp *elt = &(*elts)[elts_head];
- struct ibv_recv_wr *wr = &elt->wr;
- uint64_t wr_id = wr->wr_id;
- unsigned int len;
- unsigned int pkt_buf_len;
- struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
- struct rte_mbuf **pkt_buf_next = &pkt_buf;
- unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
- unsigned int j = 0;
- uint32_t flags;
-
- /* Sanity checks. */
-#ifdef NDEBUG
- (void)wr_id;
-#endif
- assert(wr_id < rxq->elts_n);
- assert(wr->sg_list == elt->sges);
- assert(wr->num_sge == elemof(elt->sges));
- assert(elts_head < rxq->elts_n);
- assert(rxq->elts_head < rxq->elts_n);
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
- if (unlikely(ret < 0)) {
- struct ibv_wc wc;
- int wcs_n;
-
- DEBUG("rxq=%p, poll_length() failed (ret=%d)",
- (void *)rxq, ret);
- /* ibv_poll_cq() must be used in case of failure. */
- wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
- if (unlikely(wcs_n == 0))
- break;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
- (void *)rxq, wcs_n);
- break;
- }
- assert(wcs_n == 1);
- if (unlikely(wc.status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
- " completion status (%d): %s",
- (void *)rxq, wc.wr_id, wc.status,
- ibv_wc_status_str(wc.status));
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increment dropped packets counter. */
- ++rxq->stats.idropped;
-#endif
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
- goto repost;
- }
- ret = wc.byte_len;
- }
- if (ret == 0)
- break;
- len = ret;
- pkt_buf_len = len;
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
- /*
- * Replace spent segments with new ones, concatenate and
- * return them as pkt_buf.
- */
- while (1) {
- struct ibv_sge *sge = &elt->sges[j];
- struct rte_mbuf *seg = elt->bufs[j];
- struct rte_mbuf *rep;
- unsigned int seg_tailroom;
-
- /*
- * Fetch initial bytes of packet descriptor into a
- * cacheline while allocating rep.
- */
- rte_prefetch0(seg);
- rep = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(rep == NULL)) {
- /*
- * Unable to allocate a replacement mbuf,
- * repost WR.
- */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ":"
- " can't allocate a new mbuf",
- (void *)rxq, wr_id);
- if (pkt_buf != NULL) {
- *pkt_buf_next = NULL;
- rte_pktmbuf_free(pkt_buf);
- }
- /* Increase out of memory counters. */
- ++rxq->stats.rx_nombuf;
- ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
- goto repost;
- }
-#ifndef NDEBUG
- /* Poison user-modifiable fields in rep. */
- NEXT(rep) = (void *)((uintptr_t)-1);
- SET_DATA_OFF(rep, 0xdead);
- DATA_LEN(rep) = 0xd00d;
- PKT_LEN(rep) = 0xdeadd00d;
- NB_SEGS(rep) = 0x2a;
- PORT(rep) = 0x2a;
- rep->ol_flags = -1;
- /*
- * Clear special flags in mbuf to avoid
- * crashing while freeing.
- */
- rep->ol_flags &=
- ~(uint64_t)(IND_ATTACHED_MBUF |
- CTRL_MBUF_FLAG);
-#endif
- assert(rep->buf_len == seg->buf_len);
- /* Reconfigure sge to use rep instead of seg. */
- assert(sge->lkey == rxq->mr->lkey);
- sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
- elt->bufs[j] = rep;
- ++j;
- /* Update pkt_buf if it's the first segment, or link
- * seg to the previous one and update pkt_buf_next. */
- *pkt_buf_next = seg;
- pkt_buf_next = &NEXT(seg);
- /* Update seg information. */
- seg_tailroom = (seg->buf_len - seg_headroom);
- assert(sge->length == seg_tailroom);
- SET_DATA_OFF(seg, seg_headroom);
- if (likely(len <= seg_tailroom)) {
- /* Last segment. */
- DATA_LEN(seg) = len;
- PKT_LEN(seg) = len;
- /* Sanity check. */
- assert(rte_pktmbuf_headroom(seg) ==
- seg_headroom);
- assert(rte_pktmbuf_tailroom(seg) ==
- (seg_tailroom - len));
- break;
- }
- DATA_LEN(seg) = seg_tailroom;
- PKT_LEN(seg) = seg_tailroom;
- /* Sanity check. */
- assert(rte_pktmbuf_headroom(seg) == seg_headroom);
- assert(rte_pktmbuf_tailroom(seg) == 0);
- /* Fix len and clear headroom for next segments. */
- len -= seg_tailroom;
- seg_headroom = 0;
- }
- /* Update head and tail segments. */
- *pkt_buf_next = NULL;
- assert(pkt_buf != NULL);
- assert(j != 0);
- NB_SEGS(pkt_buf) = j;
- PORT(pkt_buf) = rxq->port_id;
- PKT_LEN(pkt_buf) = pkt_buf_len;
- pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
- pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
-
- /* Return packet. */
- *(pkts++) = pkt_buf;
- ++pkts_ret;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase bytes counter. */
- rxq->stats.ibytes += pkt_buf_len;
-#endif
-repost:
- if (++elts_head >= elts_n)
- elts_head = 0;
- continue;
- }
- if (unlikely(i == 0))
- return 0;
- *next = NULL;
- /* Repost WRs. */
-#ifdef DEBUG_RECV
- DEBUG("%p: reposting %d WRs", (void *)rxq, i);
-#endif
- ret = ibv_post_recv(rxq->qp, head.next, &bad_wr);
- if (unlikely(ret)) {
- /* Inability to repost WRs is fatal. */
- DEBUG("%p: ibv_post_recv(): failed for WR %p: %s",
- (void *)rxq->priv,
- (void *)bad_wr,
- strerror(ret));
- abort();
- }
- rxq->elts_head = elts_head;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase packets counter. */
- rxq->stats.ipackets += pkts_ret;
-#endif
- return pkts_ret;
-}
-
-/**
- * DPDK callback for RX.
- *
- * The following function is the same as mlx4_rx_burst_sp(), except it doesn't
- * manage scattered packets. Improves performance when MRU is lower than the
- * size of the first segment.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-static uint16_t
-mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
- const unsigned int elts_n = rxq->elts_n;
- unsigned int elts_head = rxq->elts_head;
- struct ibv_sge sges[pkts_n];
- unsigned int i;
- unsigned int pkts_ret = 0;
- int ret;
-
- if (unlikely(rxq->sp))
- return mlx4_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
- for (i = 0; (i != pkts_n); ++i) {
- struct rxq_elt *elt = &(*elts)[elts_head];
- struct ibv_recv_wr *wr = &elt->wr;
- uint64_t wr_id = wr->wr_id;
- unsigned int len;
- struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(wr_id).offset);
- struct rte_mbuf *rep;
- uint32_t flags;
-
- /* Sanity checks. */
- assert(WR_ID(wr_id).id < rxq->elts_n);
- assert(wr->sg_list == &elt->sge);
- assert(wr->num_sge == 1);
- assert(elts_head < rxq->elts_n);
- assert(rxq->elts_head < rxq->elts_n);
- /*
- * Fetch initial bytes of packet descriptor into a
- * cacheline while allocating rep.
- */
- rte_mbuf_prefetch_part1(seg);
- rte_mbuf_prefetch_part2(seg);
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
- if (unlikely(ret < 0)) {
- struct ibv_wc wc;
- int wcs_n;
-
- DEBUG("rxq=%p, poll_length() failed (ret=%d)",
- (void *)rxq, ret);
- /* ibv_poll_cq() must be used in case of failure. */
- wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
- if (unlikely(wcs_n == 0))
- break;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
- (void *)rxq, wcs_n);
- break;
- }
- assert(wcs_n == 1);
- if (unlikely(wc.status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
- " completion status (%d): %s",
- (void *)rxq, wc.wr_id, wc.status,
- ibv_wc_status_str(wc.status));
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increment dropped packets counter. */
- ++rxq->stats.idropped;
-#endif
- /* Add SGE to array for repost. */
- sges[i] = elt->sge;
- goto repost;
- }
- ret = wc.byte_len;
- }
- if (ret == 0)
- break;
- len = ret;
- rep = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(rep == NULL)) {
- /*
- * Unable to allocate a replacement mbuf,
- * repost WR.
- */
- DEBUG("rxq=%p, wr_id=%" PRIu32 ":"
- " can't allocate a new mbuf",
- (void *)rxq, WR_ID(wr_id).id);
- /* Increase out of memory counters. */
- ++rxq->stats.rx_nombuf;
- ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
- /* Add SGE to array for repost. */
- sges[i] = elt->sge;
- goto repost;
- }
-
- /* Reconfigure sge to use rep instead of seg. */
- elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
- assert(elt->sge.lkey == rxq->mr->lkey);
- WR_ID(wr->wr_id).offset =
- (((uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM) -
- (uintptr_t)rep);
- assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id);
-
- /* Add SGE to array for repost. */
- sges[i] = elt->sge;
-
- /* Update seg information. */
- SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
- NB_SEGS(seg) = 1;
- PORT(seg) = rxq->port_id;
- NEXT(seg) = NULL;
- PKT_LEN(seg) = len;
- DATA_LEN(seg) = len;
- seg->packet_type = rxq_cq_to_pkt_type(flags);
- seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
-
- /* Return packet. */
- *(pkts++) = seg;
- ++pkts_ret;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase bytes counter. */
- rxq->stats.ibytes += len;
-#endif
-repost:
- if (++elts_head >= elts_n)
- elts_head = 0;
- continue;
- }
- if (unlikely(i == 0))
- return 0;
- /* Repost WRs. */
-#ifdef DEBUG_RECV
- DEBUG("%p: reposting %u WRs", (void *)rxq, i);
-#endif
- ret = rxq->if_qp->recv_burst(rxq->qp, sges, i);
- if (unlikely(ret)) {
- /* Inability to repost WRs is fatal. */
- DEBUG("%p: recv_burst(): failed (ret=%d)",
- (void *)rxq->priv,
- ret);
- abort();
- }
- rxq->elts_head = elts_head;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase packets counter. */
- rxq->stats.ipackets += pkts_ret;
-#endif
- return pkts_ret;
-}
-
-/**
- * DPDK callback for RX in secondary processes.
- *
- * This function configures all queues from primary process information
- * if necessary before reverting to the normal RX burst callback.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-static uint16_t
-mlx4_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct rxq *rxq = dpdk_rxq;
- struct priv *priv = mlx4_secondary_data_setup(rxq->priv);
- struct priv *primary_priv;
- unsigned int index;
-
- if (priv == NULL)
- return 0;
- primary_priv =
- mlx4_secondary_data[priv->dev->data->port_id].primary_priv;
- /* Look for queue index in both private structures. */
- for (index = 0; index != priv->rxqs_n; ++index)
- if (((*primary_priv->rxqs)[index] == rxq) ||
- ((*priv->rxqs)[index] == rxq))
- break;
- if (index == priv->rxqs_n)
- return 0;
- rxq = (*priv->rxqs)[index];
- return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
-}
-
-/**
- * Allocate a Queue Pair.
- * Optionally setup inline receive if supported.
- *
- * @param priv
- * Pointer to private structure.
- * @param cq
- * Completion queue to associate with QP.
- * @param desc
- * Number of descriptors in QP (hint only).
- *
- * @return
- * QP pointer or NULL in case of error.
- */
-static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- struct ibv_exp_res_domain *rd)
-{
- struct ibv_exp_qp_init_attr attr = {
- /* CQ to be associated with the send queue. */
- .send_cq = cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX4_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX4_PMD_SGE_WR_N),
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
- .pd = priv->pd,
- .res_domain = rd,
- };
-
-#ifdef INLINE_RECV
- attr.max_inl_recv = priv->inl_recv_size;
- attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
-#endif
- return ibv_exp_create_qp(priv->ctx, &attr);
-}
-
-#ifdef RSS_SUPPORT
-
-/**
- * Allocate a RSS Queue Pair.
- * Optionally setup inline receive if supported.
- *
- * @param priv
- * Pointer to private structure.
- * @param cq
- * Completion queue to associate with QP.
- * @param desc
- * Number of descriptors in QP (hint only).
- * @param children_n
- * If nonzero, a number of children for parent QP and zero for a child.
- * @param rxq_parent
- * Pointer for a parent in a child case, NULL otherwise.
- *
- * @return
- * QP pointer or NULL in case of error.
- */
-static struct ibv_qp *
-rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- int children_n, struct ibv_exp_res_domain *rd,
- struct rxq *rxq_parent)
-{
- struct ibv_exp_qp_init_attr attr = {
- /* CQ to be associated with the send queue. */
- .send_cq = cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX4_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX4_PMD_SGE_WR_N),
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
- IBV_EXP_QP_INIT_ATTR_QPG),
- .pd = priv->pd,
- .res_domain = rd,
- };
-
-#ifdef INLINE_RECV
- attr.max_inl_recv = priv->inl_recv_size,
- attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
-#endif
- if (children_n > 0) {
- attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
- /* TSS isn't necessary. */
- attr.qpg.parent_attrib.tss_child_count = 0;
- attr.qpg.parent_attrib.rss_child_count =
- rte_align32pow2(children_n + 1) >> 1;
- DEBUG("initializing parent RSS queue");
- } else {
- attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
- attr.qpg.qpg_parent = rxq_parent->qp;
- DEBUG("initializing child RSS queue");
- }
- return ibv_exp_create_qp(priv->ctx, &attr);
-}
-
-#endif /* RSS_SUPPORT */
-
-/**
- * Reconfigure a RX queue with new parameters.
- *
- * rxq_rehash() does not allocate mbufs, which, if not done from the right
- * thread (such as a control thread), may corrupt the pool.
- * In case of failure, the queue is left untouched.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rxq
- * RX queue pointer.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
-{
- struct priv *priv = rxq->priv;
- struct rxq tmpl = *rxq;
- unsigned int mbuf_n;
- unsigned int desc_n;
- struct rte_mbuf **pool;
- unsigned int i, k;
- struct ibv_exp_qp_attr mod;
- struct ibv_recv_wr *bad_wr;
- unsigned int mb_len;
- int err;
-
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
- /* Number of descriptors and mbufs currently allocated. */
- desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
- mbuf_n = desc_n;
- /* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum) {
- tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- rxq->csum = tmpl.csum;
- }
- if (priv->hw_csum_l2tun) {
- tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- rxq->csum_l2tun = tmpl.csum_l2tun;
- }
- /* Enable scattered packets support for this queue if necessary. */
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.enable_scatter &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (mb_len - RTE_PKTMBUF_HEADROOM))) {
- tmpl.sp = 1;
- desc_n /= MLX4_PMD_SGE_WR_N;
- } else
- tmpl.sp = 0;
- DEBUG("%p: %s scattered packets support (%u WRs)",
- (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n);
- /* If scatter mode is the same as before, nothing to do. */
- if (tmpl.sp == rxq->sp) {
- DEBUG("%p: nothing to do", (void *)dev);
- return 0;
- }
- /* Remove attached flows if RSS is disabled (no parent queue). */
- if (!priv->rss && !priv->isolated) {
- rxq_allmulticast_disable(&tmpl);
- rxq_promiscuous_disable(&tmpl);
- rxq_mac_addrs_del(&tmpl);
- /* Update original queue in case of failure. */
- rxq->allmulti_flow = tmpl.allmulti_flow;
- rxq->promisc_flow = tmpl.promisc_flow;
- memcpy(rxq->mac_configured, tmpl.mac_configured,
- sizeof(rxq->mac_configured));
- memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
- }
- /* From now on, any failure will render the queue unusable.
- * Reinitialize QP. */
- if (!tmpl.qp)
- goto skip_init;
- mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RESET };
- err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
- if (err) {
- ERROR("%p: cannot reset QP: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
- mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- err = ibv_exp_modify_qp(tmpl.qp, &mod,
- (IBV_EXP_QP_STATE |
- IBV_EXP_QP_PORT));
- if (err) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- };
-skip_init:
- err = ibv_resize_cq(tmpl.cq, desc_n);
- if (err) {
- ERROR("%p: cannot resize CQ: %s", (void *)dev, strerror(err));
- assert(err > 0);
- return err;
- }
- /* Reconfigure flows. Do not care for errors. */
- if (!priv->rss && !priv->isolated) {
- rxq_mac_addrs_add(&tmpl);
- if (priv->promisc)
- rxq_promiscuous_enable(&tmpl);
- if (priv->allmulti)
- rxq_allmulticast_enable(&tmpl);
- /* Update original queue in case of failure. */
- rxq->allmulti_flow = tmpl.allmulti_flow;
- rxq->promisc_flow = tmpl.promisc_flow;
- memcpy(rxq->mac_configured, tmpl.mac_configured,
- sizeof(rxq->mac_configured));
- memcpy(rxq->mac_flow, tmpl.mac_flow, sizeof(rxq->mac_flow));
- }
- /* Allocate pool. */
- pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
- if (pool == NULL) {
- ERROR("%p: cannot allocate memory", (void *)dev);
- return ENOBUFS;
- }
- /* Snatch mbufs from original queue. */
- k = 0;
- if (rxq->sp) {
- struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
-
- for (i = 0; (i != elemof(*elts)); ++i) {
- struct rxq_elt_sp *elt = &(*elts)[i];
- unsigned int j;
-
- for (j = 0; (j != elemof(elt->bufs)); ++j) {
- assert(elt->bufs[j] != NULL);
- pool[k++] = elt->bufs[j];
- }
- }
- } else {
- struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
-
- for (i = 0; (i != elemof(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf = (void *)
- ((uintptr_t)elt->sge.addr -
- WR_ID(elt->wr.wr_id).offset);
-
- assert(WR_ID(elt->wr.wr_id).id == i);
- pool[k++] = buf;
- }
- }
- assert(k == mbuf_n);
- tmpl.elts_n = 0;
- tmpl.elts.sp = NULL;
- assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp);
- err = ((tmpl.sp) ?
- rxq_alloc_elts_sp(&tmpl, desc_n, pool) :
- rxq_alloc_elts(&tmpl, desc_n, pool));
- if (err) {
- ERROR("%p: cannot reallocate WRs, aborting", (void *)dev);
- rte_free(pool);
- assert(err > 0);
- return err;
- }
- assert(tmpl.elts_n == desc_n);
- assert(tmpl.elts.sp != NULL);
- rte_free(pool);
- /* Clean up original data. */
- rxq->elts_n = 0;
- rte_free(rxq->elts.sp);
- rxq->elts.sp = NULL;
- if (!tmpl.qp)
- goto skip_rtr;
- /* Post WRs. */
- err = ibv_post_recv(tmpl.qp,
- (tmpl.sp ?
- &(*tmpl.elts.sp)[0].wr :
- &(*tmpl.elts.no_sp)[0].wr),
- &bad_wr);
- if (err) {
- ERROR("%p: ibv_post_recv() failed for WR %p: %s",
- (void *)dev,
- (void *)bad_wr,
- strerror(err));
- goto skip_rtr;
- }
- mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- err = ibv_exp_modify_qp(tmpl.qp, &mod, IBV_EXP_QP_STATE);
- if (err)
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(err));
-skip_rtr:
- *rxq = tmpl;
- assert(err >= 0);
- return err;
-}
-
-/**
- * Create verbs QP resources associated with a rxq.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param inactive
- * If true, the queue is disabled because its index is higher or
- * equal to the real number of queues, which must be a power of 2.
- * @param children_n
- * The number of children in a parent case, zero for a child.
- * @param rxq_parent
- * The pointer to a parent RX structure for a child in RSS case,
- * NULL for parent.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-rxq_create_qp(struct rxq *rxq,
- uint16_t desc,
- int inactive,
- int children_n,
- struct rxq *rxq_parent)
-{
- int ret;
- struct ibv_exp_qp_attr mod;
- struct ibv_exp_query_intf_params params;
- enum ibv_exp_query_intf_status status;
- struct ibv_recv_wr *bad_wr;
- int parent = (children_n > 0);
- struct priv *priv = rxq->priv;
-
-#ifdef RSS_SUPPORT
- if (priv->rss && !inactive && (rxq_parent || parent))
- rxq->qp = rxq_setup_qp_rss(priv, rxq->cq, desc,
- children_n, rxq->rd,
- rxq_parent);
- else
-#endif /* RSS_SUPPORT */
- rxq->qp = rxq_setup_qp(priv, rxq->cq, desc, rxq->rd);
- if (rxq->qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("QP creation failure: %s",
- strerror(ret));
- return ret;
- }
- mod = (struct ibv_exp_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_exp_modify_qp(rxq->qp, &mod,
- (IBV_EXP_QP_STATE |
-#ifdef RSS_SUPPORT
- (parent ? IBV_EXP_QP_GROUP_RSS : 0) |
-#endif /* RSS_SUPPORT */
- IBV_EXP_QP_PORT));
- if (ret) {
- ERROR("QP state to IBV_QPS_INIT failed: %s",
- strerror(ret));
- return ret;
- }
- if (!priv->isolated && (parent || !priv->rss)) {
- /* Configure MAC and broadcast addresses. */
- ret = rxq_mac_addrs_add(rxq);
- if (ret) {
- ERROR("QP flow attachment failed: %s",
- strerror(ret));
- return ret;
- }
- }
- if (!parent) {
- ret = ibv_post_recv(rxq->qp,
- (rxq->sp ?
- &(*rxq->elts.sp)[0].wr :
- &(*rxq->elts.no_sp)[0].wr),
- &bad_wr);
- if (ret) {
- ERROR("ibv_post_recv() failed for WR %p: %s",
- (void *)bad_wr,
- strerror(ret));
- return ret;
- }
- }
- mod = (struct ibv_exp_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_exp_modify_qp(rxq->qp, &mod, IBV_EXP_QP_STATE);
- if (ret) {
- ERROR("QP state to IBV_QPS_RTR failed: %s",
- strerror(ret));
- return ret;
- }
- params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .obj = rxq->qp,
- };
- rxq->if_qp = ibv_exp_query_intf(priv->ctx, &params, &status);
- if (rxq->if_qp == NULL) {
- ERROR("QP interface family query failed with status %d",
- status);
- return errno;
- }
- return 0;
-}
-
-/**
- * Configure a RX queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rxq
- * Pointer to RX queue structure.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param inactive
- * If true, the queue is disabled because its index is higher or
- * equal to the real number of queues, which must be a power of 2.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
- * @param children_n
- * The number of children in a parent case, zero for a child.
- * @param rxq_parent
- * The pointer to a parent RX structure (or NULL) in a child case,
- * NULL for parent.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, int inactive,
- const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp, int children_n,
- struct rxq *rxq_parent)
-{
- struct priv *priv = dev->data->dev_private;
- struct rxq tmpl = {
- .priv = priv,
- .mp = mp,
- .socket = socket
- };
- union {
- struct ibv_exp_query_intf_params params;
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_res_domain_init_attr rd;
- } attr;
- enum ibv_exp_query_intf_status status;
- unsigned int mb_len;
- int ret = 0;
- int parent = (children_n > 0);
-
- (void)conf; /* Thresholds configuration (ignored). */
- /*
- * If this is a parent queue, hardware must support RSS and
- * RSS must be enabled.
- */
- assert((!parent) || ((priv->hw_rss) && (priv->rss)));
- if (parent) {
- /* Even if unused, ibv_create_cq() requires at least one
- * descriptor. */
- desc = 1;
- goto skip_mr;
- }
- mb_len = rte_pktmbuf_data_room_size(mp);
- if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
- ERROR("%p: invalid number of RX descriptors (must be a"
- " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
- return EINVAL;
- }
- /* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
- tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
- tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- /* Enable scattered packets support for this queue if necessary. */
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
- tmpl.sp = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
- tmpl.sp = 1;
- desc /= MLX4_PMD_SGE_WR_N;
- } else {
- WARN("%p: the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered"
- " mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
- }
- DEBUG("%p: %s scattered packets support (%u WRs)",
- (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
- /* Use the entire RX mempool as the memory region. */
- tmpl.mr = mlx4_mp2mr(priv->pd, mp);
- if (tmpl.mr == NULL) {
- ret = EINVAL;
- ERROR("%p: MR creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
-skip_mr:
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- if (dev->data->dev_conf.intr_conf.rxq) {
- tmpl.channel = ibv_create_comp_channel(priv->ctx);
- if (tmpl.channel == NULL) {
- ret = ENOMEM;
- ERROR("%p: Rx interrupt completion channel creation"
- " failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- }
- attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
- };
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0,
- &attr.cq);
- if (tmpl.cq == NULL) {
- ret = ENOMEM;
- ERROR("%p: CQ creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- /* Allocate descriptors for RX queues, except for the RSS parent. */
- if (parent)
- goto skip_alloc;
- if (tmpl.sp)
- ret = rxq_alloc_elts_sp(&tmpl, desc, NULL);
- else
- ret = rxq_alloc_elts(&tmpl, desc, NULL);
+ /* Prepare internal flow rules. */
+ ret = mlx4_flow_sync(priv, &error);
if (ret) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(ret));
- return ret;
+ ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
}
-skip_alloc:
- if (parent || rxq_parent || !priv->rss) {
- ret = rxq_create_qp(&tmpl, desc, inactive,
- children_n, rxq_parent);
- if (ret)
- goto error;
- }
- /* Save port ID. */
- tmpl.port_id = dev->data->port_id;
- DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ret = EINVAL;
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- /* Clean up rxq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
- rxq_cleanup(rxq);
- *rxq = tmpl;
- DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
- assert(ret == 0);
- return 0;
-error:
- rxq_cleanup(&tmpl);
- assert(ret > 0);
return ret;
}
/**
- * DPDK callback to configure a RX queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * RX queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
-{
- struct rxq *parent;
- struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = (*priv->rxqs)[idx];
- int inactive = 0;
- int ret;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= priv->rxqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->rxqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
- }
- if (rxq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)rxq);
- if (priv->started) {
- priv_unlock(priv);
- return -EEXIST;
- }
- (*priv->rxqs)[idx] = NULL;
- rxq_cleanup(rxq);
- } else {
- rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
- if (rxq == NULL) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
- }
- if (priv->rss && !priv->isolated) {
- /* The list consists of the single default one. */
- parent = LIST_FIRST(&priv->parents);
- if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
- inactive = 1;
- } else {
- parent = NULL;
- }
- ret = rxq_setup(dev, rxq, desc, socket,
- inactive, conf, mp, 0, parent);
- if (ret)
- rte_free(rxq);
- else {
- rxq->stats.idx = idx;
- DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq);
- (*priv->rxqs)[idx] = rxq;
- /* Update receive callback. */
- if (rxq->sp)
- dev->rx_pkt_burst = mlx4_rx_burst_sp;
- else
- dev->rx_pkt_burst = mlx4_rx_burst;
- }
- priv_unlock(priv);
- return -ret;
-}
-
-/**
- * DPDK callback to release a RX queue.
- *
- * @param dpdk_rxq
- * Generic RX queue pointer.
- */
-static void
-mlx4_rx_queue_release(void *dpdk_rxq)
-{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct priv *priv;
- unsigned int i;
-
- if (mlx4_is_secondary())
- return;
- if (rxq == NULL)
- return;
- priv = rxq->priv;
- priv_lock(priv);
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] == rxq) {
- DEBUG("%p: removing RX queue %p from list",
- (void *)priv->dev, (void *)rxq);
- (*priv->rxqs)[i] = NULL;
- break;
- }
- rxq_cleanup(rxq);
- rte_free(rxq);
- priv_unlock(priv);
-}
-
-static int
-priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
-
-static int
-priv_dev_removal_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
-
-static int
-priv_dev_link_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
-
-/**
* DPDK callback to start the device.
*
- * Simulate device start by attaching all configured flows.
+ * Simulate device start by initializing common RSS resources and attaching
+ * all configured flows.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
mlx4_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i = 0;
- unsigned int r;
- struct rxq *rxq;
+ struct rte_flow_error error;
int ret;
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- if (priv->started) {
- priv_unlock(priv);
+ if (priv->started)
return 0;
- }
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
priv->started = 1;
- if (priv->isolated) {
- rxq = NULL;
- r = 1;
- } else if (priv->rss) {
- rxq = LIST_FIRST(&priv->parents);
- r = 1;
- } else {
- rxq = (*priv->rxqs)[0];
- r = priv->rxqs_n;
- }
- /* Iterate only once when RSS is enabled. */
- do {
- /* Ignore nonexistent RX queues. */
- if (rxq == NULL)
- continue;
- ret = rxq_mac_addrs_add(rxq);
- if (!ret && priv->promisc)
- ret = rxq_promiscuous_enable(rxq);
- if (!ret && priv->allmulti)
- ret = rxq_allmulticast_enable(rxq);
- if (!ret)
- continue;
- WARN("%p: QP flow attachment failed: %s",
- (void *)dev, strerror(ret));
- goto err;
- } while ((--r) && ((rxq = (*priv->rxqs)[++i]), i));
- ret = priv_dev_link_interrupt_handler_install(priv, dev);
+ ret = mlx4_rss_init(priv);
if (ret) {
- ERROR("%p: LSC handler install failed",
- (void *)dev);
+ ERROR("%p: cannot initialize RSS resources: %s",
+ (void *)dev, strerror(-ret));
goto err;
}
- ret = priv_dev_removal_interrupt_handler_install(priv, dev);
+ ret = mlx4_intr_install(priv);
if (ret) {
- ERROR("%p: RMV handler install failed",
+ ERROR("%p: interrupt handler installation failed",
(void *)dev);
goto err;
}
- ret = priv_rx_intr_vec_enable(priv);
- if (ret) {
- ERROR("%p: Rx interrupt vector creation failed",
- (void *)dev);
- goto err;
- }
- ret = mlx4_priv_flow_start(priv);
+ ret = mlx4_flow_sync(priv, &error);
if (ret) {
- ERROR("%p: flow start failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ (void *)dev,
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
goto err;
}
- priv_unlock(priv);
+ rte_wmb();
+ dev->tx_pkt_burst = mlx4_tx_burst;
+ dev->rx_pkt_burst = mlx4_rx_burst;
return 0;
err:
/* Rollback. */
- while (i != 0) {
- rxq = (*priv->rxqs)[i--];
- if (rxq != NULL) {
- rxq_allmulticast_disable(rxq);
- rxq_promiscuous_disable(rxq);
- rxq_mac_addrs_del(rxq);
- }
- }
priv->started = 0;
- priv_unlock(priv);
- return -ret;
+ return ret;
}
/**
@@ -4220,102 +178,19 @@ static void
mlx4_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i = 0;
- unsigned int r;
- struct rxq *rxq;
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (!priv->started) {
- priv_unlock(priv);
+ if (!priv->started)
return;
- }
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
- if (priv->isolated) {
- rxq = NULL;
- r = 1;
- } else if (priv->rss) {
- rxq = LIST_FIRST(&priv->parents);
- r = 1;
- } else {
- rxq = (*priv->rxqs)[0];
- r = priv->rxqs_n;
- }
- mlx4_priv_flow_stop(priv);
- /* Iterate only once when RSS is enabled. */
- do {
- /* Ignore nonexistent RX queues. */
- if (rxq == NULL)
- continue;
- rxq_allmulticast_disable(rxq);
- rxq_promiscuous_disable(rxq);
- rxq_mac_addrs_del(rxq);
- } while ((--r) && ((rxq = (*priv->rxqs)[++i]), i));
- priv_unlock(priv);
-}
-
-/**
- * Dummy DPDK callback for TX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static uint16_t
-removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
- return 0;
-}
-
-/**
- * Dummy DPDK callback for RX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-static uint16_t
-removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
- return 0;
+ dev->tx_pkt_burst = mlx4_tx_burst_removed;
+ dev->rx_pkt_burst = mlx4_rx_burst_removed;
+ rte_wmb();
+ mlx4_flow_sync(priv, NULL);
+ mlx4_intr_uninstall(priv);
+ mlx4_rss_deinit(priv);
}
-static int
-priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
-
-static int
-priv_dev_removal_interrupt_handler_uninstall(struct priv *,
- struct rte_eth_dev *);
-
-static int
-priv_dev_link_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
-
/**
* DPDK callback to close the device.
*
@@ -4327,1047 +202,58 @@ priv_dev_link_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
static void
mlx4_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = mlx4_get_priv(dev);
- void *tmp;
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- if (priv == NULL)
- return;
- priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
- /* Prevent crashes when queues are still in use. This is unfortunately
- * still required for DPDK 1.3 because some programs (such as testpmd)
- * never release them before closing the device. */
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
- if (priv->rxqs != NULL) {
- /* XXX race condition if mlx4_rx_burst() is still running. */
- usleep(1000);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- tmp = (*priv->rxqs)[i];
- if (tmp == NULL)
- continue;
- (*priv->rxqs)[i] = NULL;
- rxq_cleanup(tmp);
- rte_free(tmp);
- }
- priv->rxqs_n = 0;
- priv->rxqs = NULL;
- }
- if (priv->txqs != NULL) {
- /* XXX race condition if mlx4_tx_burst() is still running. */
- usleep(1000);
- for (i = 0; (i != priv->txqs_n); ++i) {
- tmp = (*priv->txqs)[i];
- if (tmp == NULL)
- continue;
- (*priv->txqs)[i] = NULL;
- txq_cleanup(tmp);
- rte_free(tmp);
- }
- priv->txqs_n = 0;
- priv->txqs = NULL;
- }
- if (priv->rss)
- priv_parent_list_cleanup(priv);
+ dev->rx_pkt_burst = mlx4_rx_burst_removed;
+ dev->tx_pkt_burst = mlx4_tx_burst_removed;
+ rte_wmb();
+ mlx4_flow_clean(priv);
+ for (i = 0; i != dev->data->nb_rx_queues; ++i)
+ mlx4_rx_queue_release(dev->data->rx_queues[i]);
+ for (i = 0; i != dev->data->nb_tx_queues; ++i)
+ mlx4_tx_queue_release(dev->data->tx_queues[i]);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(ibv_dealloc_pd(priv->pd));
claim_zero(ibv_close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
- priv_dev_removal_interrupt_handler_uninstall(priv, dev);
- priv_dev_link_interrupt_handler_uninstall(priv, dev);
- priv_rx_intr_vec_disable(priv);
- priv_unlock(priv);
+ mlx4_intr_uninstall(priv);
memset(priv, 0, sizeof(*priv));
}
-/**
- * Change the link state (UP / DOWN).
- *
- * @param priv
- * Pointer to Ethernet device private data.
- * @param up
- * Nonzero for link up, otherwise link down.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_set_link(struct priv *priv, int up)
-{
- struct rte_eth_dev *dev = priv->dev;
- int err;
- unsigned int i;
-
- if (up) {
- err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
- if (err)
- return err;
- for (i = 0; i < priv->rxqs_n; i++)
- if ((*priv->rxqs)[i]->sp)
- break;
- /* Check if an sp queue exists.
- * Note: Some old frames might be received.
- */
- if (i == priv->rxqs_n)
- dev->rx_pkt_burst = mlx4_rx_burst;
- else
- dev->rx_pkt_burst = mlx4_rx_burst_sp;
- dev->tx_pkt_burst = mlx4_tx_burst;
- } else {
- err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
- if (err)
- return err;
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
- }
- return 0;
-}
-
-/**
- * DPDK callback to bring the link DOWN.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-mlx4_set_link_down(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_set_link(priv, 0);
- priv_unlock(priv);
- return err;
-}
-
-/**
- * DPDK callback to bring the link UP.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-mlx4_set_link_up(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_set_link(priv, 1);
- priv_unlock(priv);
- return err;
-}
-/**
- * DPDK callback to get information about the device.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] info
- * Info structure output buffer.
- */
-static void
-mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
-{
- struct priv *priv = mlx4_get_priv(dev);
- unsigned int max;
- char ifname[IF_NAMESIZE];
-
- info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
- if (priv == NULL)
- return;
- priv_lock(priv);
- /* FIXME: we should ask the device for these values. */
- info->min_rx_bufsize = 32;
- info->max_rx_pktlen = 65536;
- /*
- * Since we need one CQ per QP, the limit is the minimum number
- * between the two values.
- */
- max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
- priv->device_attr.max_qp : priv->device_attr.max_cq);
- /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
- if (max >= 65535)
- max = 65535;
- info->max_rx_queues = max;
- info->max_tx_queues = max;
- /* Last array entry is reserved for broadcast. */
- info->max_mac_addrs = (elemof(priv->mac) - 1);
- info->rx_offload_capa =
- (priv->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0);
- info->tx_offload_capa =
- (priv->hw_csum ?
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM) :
- 0);
- if (priv_get_ifname(priv, &ifname) == 0)
- info->if_index = if_nametoindex(ifname);
- info->speed_capa =
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G |
- ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_56G;
- priv_unlock(priv);
-}
-
-static const uint32_t *
-mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
-{
- static const uint32_t ptypes[] = {
- /* refers to rxq_cq_to_pkt_type() */
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_INNER_L3_IPV4,
- RTE_PTYPE_INNER_L3_IPV6,
- RTE_PTYPE_UNKNOWN
- };
-
- if (dev->rx_pkt_burst == mlx4_rx_burst ||
- dev->rx_pkt_burst == mlx4_rx_burst_sp)
- return ptypes;
- return NULL;
-}
-
-/**
- * DPDK callback to get device statistics.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] stats
- * Stats structure output buffer.
- */
-static void
-mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct priv *priv = mlx4_get_priv(dev);
- struct rte_eth_stats tmp = {0};
- unsigned int i;
- unsigned int idx;
-
- if (priv == NULL)
- return;
- priv_lock(priv);
- /* Add software counters. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
-
- if (rxq == NULL)
- continue;
- idx = rxq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX4_PMD_SOFT_COUNTERS
- tmp.q_ipackets[idx] += rxq->stats.ipackets;
- tmp.q_ibytes[idx] += rxq->stats.ibytes;
-#endif
- tmp.q_errors[idx] += (rxq->stats.idropped +
- rxq->stats.rx_nombuf);
- }
-#ifdef MLX4_PMD_SOFT_COUNTERS
- tmp.ipackets += rxq->stats.ipackets;
- tmp.ibytes += rxq->stats.ibytes;
-#endif
- tmp.ierrors += rxq->stats.idropped;
- tmp.rx_nombuf += rxq->stats.rx_nombuf;
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
-
- if (txq == NULL)
- continue;
- idx = txq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX4_PMD_SOFT_COUNTERS
- tmp.q_opackets[idx] += txq->stats.opackets;
- tmp.q_obytes[idx] += txq->stats.obytes;
-#endif
- tmp.q_errors[idx] += txq->stats.odropped;
- }
-#ifdef MLX4_PMD_SOFT_COUNTERS
- tmp.opackets += txq->stats.opackets;
- tmp.obytes += txq->stats.obytes;
-#endif
- tmp.oerrors += txq->stats.odropped;
- }
-#ifndef MLX4_PMD_SOFT_COUNTERS
- /* FIXME: retrieve and add hardware counters. */
-#endif
- *stats = tmp;
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to clear device statistics.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- */
-static void
-mlx4_stats_reset(struct rte_eth_dev *dev)
-{
- struct priv *priv = mlx4_get_priv(dev);
- unsigned int i;
- unsigned int idx;
-
- if (priv == NULL)
- return;
- priv_lock(priv);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- idx = (*priv->rxqs)[i]->stats.idx;
- (*priv->rxqs)[i]->stats =
- (struct mlx4_rxq_stats){ .idx = idx };
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- if ((*priv->txqs)[i] == NULL)
- continue;
- idx = (*priv->txqs)[i]->stats.idx;
- (*priv->txqs)[i]->stats =
- (struct mlx4_txq_stats){ .idx = idx };
- }
-#ifndef MLX4_PMD_SOFT_COUNTERS
- /* FIXME: reset hardware counters. */
-#endif
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to remove a MAC address.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param index
- * MAC address index.
- */
-static void
-mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
-{
- struct priv *priv = dev->data->dev_private;
-
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (priv->isolated)
- goto end;
- DEBUG("%p: removing MAC address from index %" PRIu32,
- (void *)dev, index);
- /* Last array entry is reserved for broadcast. */
- if (index >= (elemof(priv->mac) - 1))
- goto end;
- priv_mac_addr_del(priv, index);
-end:
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to add a MAC address.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param mac_addr
- * MAC address to register.
- * @param index
- * MAC address index.
- * @param vmdq
- * VMDq pool index to associate address with (ignored).
- */
-static int
-mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t vmdq)
-{
- struct priv *priv = dev->data->dev_private;
- int re;
-
- if (mlx4_is_secondary())
- return -ENOTSUP;
- (void)vmdq;
- priv_lock(priv);
- if (priv->isolated) {
- DEBUG("%p: cannot add MAC address, "
- "device is in isolated mode", (void *)dev);
- re = EPERM;
- goto end;
- }
- DEBUG("%p: adding MAC address at index %" PRIu32,
- (void *)dev, index);
- /* Last array entry is reserved for broadcast. */
- if (index >= (elemof(priv->mac) - 1)) {
- re = EINVAL;
- goto end;
- }
- re = priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
-end:
- priv_unlock(priv);
- return -re;
-}
-
-/**
- * DPDK callback to set the primary MAC address.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param mac_addr
- * MAC address to register.
- */
-static void
-mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
-{
- DEBUG("%p: setting primary MAC address", (void *)dev);
- mlx4_mac_addr_remove(dev, 0);
- mlx4_mac_addr_add(dev, mac_addr, 0, 0);
-}
-
-/**
- * DPDK callback to enable promiscuous mode.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- */
-static void
-mlx4_promiscuous_enable(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- int ret;
-
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (priv->isolated) {
- DEBUG("%p: cannot enable promiscuous, "
- "device is in isolated mode", (void *)dev);
- priv_unlock(priv);
- return;
- }
- if (priv->promisc) {
- priv_unlock(priv);
- return;
- }
- /* If device isn't started, this is all we need to do. */
- if (!priv->started)
- goto end;
- if (priv->rss) {
- ret = rxq_promiscuous_enable(LIST_FIRST(&priv->parents));
- if (ret) {
- priv_unlock(priv);
- return;
- }
- goto end;
- }
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- ret = rxq_promiscuous_enable((*priv->rxqs)[i]);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- if ((*priv->rxqs)[--i] != NULL)
- rxq_promiscuous_disable((*priv->rxqs)[i]);
- priv_unlock(priv);
- return;
- }
-end:
- priv->promisc = 1;
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to disable promiscuous mode.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- */
-static void
-mlx4_promiscuous_disable(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
-
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (!priv->promisc || priv->isolated) {
- priv_unlock(priv);
- return;
- }
- if (priv->rss) {
- rxq_promiscuous_disable(LIST_FIRST(&priv->parents));
- goto end;
- }
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] != NULL)
- rxq_promiscuous_disable((*priv->rxqs)[i]);
-end:
- priv->promisc = 0;
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to enable allmulti mode.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- */
-static void
-mlx4_allmulticast_enable(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- int ret;
-
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (priv->isolated) {
- DEBUG("%p: cannot enable allmulticast, "
- "device is in isolated mode", (void *)dev);
- priv_unlock(priv);
- return;
- }
- if (priv->allmulti) {
- priv_unlock(priv);
- return;
- }
- /* If device isn't started, this is all we need to do. */
- if (!priv->started)
- goto end;
- if (priv->rss) {
- ret = rxq_allmulticast_enable(LIST_FIRST(&priv->parents));
- if (ret) {
- priv_unlock(priv);
- return;
- }
- goto end;
- }
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- ret = rxq_allmulticast_enable((*priv->rxqs)[i]);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- if ((*priv->rxqs)[--i] != NULL)
- rxq_allmulticast_disable((*priv->rxqs)[i]);
- priv_unlock(priv);
- return;
- }
-end:
- priv->allmulti = 1;
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to disable allmulti mode.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- */
-static void
-mlx4_allmulticast_disable(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
-
- if (mlx4_is_secondary())
- return;
- priv_lock(priv);
- if (!priv->allmulti || priv->isolated) {
- priv_unlock(priv);
- return;
- }
- if (priv->rss) {
- rxq_allmulticast_disable(LIST_FIRST(&priv->parents));
- goto end;
- }
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] != NULL)
- rxq_allmulticast_disable((*priv->rxqs)[i]);
-end:
- priv->allmulti = 0;
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to retrieve physical link information.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-static int
-mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- const struct priv *priv = mlx4_get_priv(dev);
- struct ethtool_cmd edata = {
- .cmd = ETHTOOL_GSET
- };
- struct ifreq ifr;
- struct rte_eth_link dev_link;
- int link_speed = 0;
-
- /* priv_lock() is not taken to allow concurrent calls. */
-
- if (priv == NULL)
- return -EINVAL;
- (void)wait_to_complete;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
- }
- memset(&dev_link, 0, sizeof(dev_link));
- dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
- (ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- strerror(errno));
- return -1;
- }
- link_speed = ethtool_cmd_speed(&edata);
- if (link_speed == -1)
- dev_link.link_speed = 0;
- else
- dev_link.link_speed = link_speed;
- dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
- ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
- dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_FIXED);
- if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
- /* Link status changed. */
- dev->data->dev_link = dev_link;
- return 0;
- }
- /* Link status is still the same. */
- return -1;
-}
-
-static int
-mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
- struct rte_pci_addr *pci_addr);
-
-/**
- * DPDK callback to change the MTU.
- *
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param in_mtu
- * New MTU.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
-{
- struct priv *priv = dev->data->dev_private;
- int ret = 0;
- unsigned int i;
- uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
- mlx4_rx_burst;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- /* Set kernel interface MTU first. */
- if (priv_set_mtu(priv, mtu)) {
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
- goto out;
- } else
- DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
- priv->mtu = mtu;
- /* Temporarily replace RX handler with a fake one, assuming it has not
- * been copied elsewhere. */
- dev->rx_pkt_burst = removed_rx_burst;
- /* Make sure everyone has left mlx4_rx_burst() and uses
- * removed_rx_burst() instead. */
- rte_wmb();
- usleep(1000);
- /* Reconfigure each RX queue. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int max_frame_len;
-
- if (rxq == NULL)
- continue;
- /* Calculate new maximum frame length according to MTU. */
- max_frame_len = (priv->mtu + ETHER_HDR_LEN +
- (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- /* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame =
- (max_frame_len > ETHER_MAX_LEN);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- ret = rxq_rehash(dev, rxq);
- if (ret) {
- /* Force SP RX if that queue requires it and abort. */
- if (rxq->sp)
- rx_func = mlx4_rx_burst_sp;
- break;
- }
- /* Reenable non-RSS queue attributes. No need to check
- * for errors at this stage. */
- if (!priv->rss && !priv->isolated) {
- rxq_mac_addrs_add(rxq);
- if (priv->promisc)
- rxq_promiscuous_enable(rxq);
- if (priv->allmulti)
- rxq_allmulticast_enable(rxq);
- }
- /* Scattered burst function takes priority. */
- if (rxq->sp)
- rx_func = mlx4_rx_burst_sp;
- }
- /* Burst functions can now be called again. */
- rte_wmb();
- dev->rx_pkt_burst = rx_func;
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
-}
-
-/**
- * DPDK callback to get flow control status.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] fc_conf
- * Flow control output buffer.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
- struct priv *priv = dev->data->dev_private;
- struct ifreq ifr;
- struct ethtool_pauseparam ethpause = {
- .cmd = ETHTOOL_GPAUSEPARAM
- };
- int ret;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- ifr.ifr_data = (void *)&ethpause;
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
- }
-
- fc_conf->autoneg = ethpause.autoneg;
- if (ethpause.rx_pause && ethpause.tx_pause)
- fc_conf->mode = RTE_FC_FULL;
- else if (ethpause.rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
- else if (ethpause.tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
- else
- fc_conf->mode = RTE_FC_NONE;
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
-}
-
-/**
- * DPDK callback to modify flow control parameters.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[in] fc_conf
- * Flow control parameters.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
-{
- struct priv *priv = dev->data->dev_private;
- struct ifreq ifr;
- struct ethtool_pauseparam ethpause = {
- .cmd = ETHTOOL_SPAUSEPARAM
- };
- int ret;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- ifr.ifr_data = (void *)&ethpause;
- ethpause.autoneg = fc_conf->autoneg;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
- ethpause.rx_pause = 1;
- else
- ethpause.rx_pause = 0;
-
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
- ethpause.tx_pause = 1;
- else
- ethpause.tx_pause = 0;
-
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
- }
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
-}
-
-/**
- * Configure a VLAN filter.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param vlan_id
- * VLAN ID to filter.
- * @param on
- * Toggle filter.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int j = -1;
-
- DEBUG("%p: %s VLAN filter ID %" PRIu16,
- (void *)dev, (on ? "enable" : "disable"), vlan_id);
- for (i = 0; (i != elemof(priv->vlan_filter)); ++i) {
- if (!priv->vlan_filter[i].enabled) {
- /* Unused index, remember it. */
- j = i;
- continue;
- }
- if (priv->vlan_filter[i].id != vlan_id)
- continue;
- /* This VLAN ID is already known, use its index. */
- j = i;
- break;
- }
- /* Check if there's room for another VLAN filter. */
- if (j == (unsigned int)-1)
- return ENOMEM;
- /*
- * VLAN filters apply to all configured MAC addresses, flow
- * specifications must be reconfigured accordingly.
- */
- priv->vlan_filter[j].id = vlan_id;
- if ((on) && (!priv->vlan_filter[j].enabled)) {
- /*
- * Filter is disabled, enable it.
- * Rehashing flows in all RX queues is necessary.
- */
- if (priv->rss)
- rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
- else
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] != NULL)
- rxq_mac_addrs_del((*priv->rxqs)[i]);
- priv->vlan_filter[j].enabled = 1;
- if (priv->started) {
- if (priv->rss)
- rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
- else
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- rxq_mac_addrs_add((*priv->rxqs)[i]);
- }
- }
- } else if ((!on) && (priv->vlan_filter[j].enabled)) {
- /*
- * Filter is enabled, disable it.
- * Rehashing flows in all RX queues is necessary.
- */
- if (priv->rss)
- rxq_mac_addrs_del(LIST_FIRST(&priv->parents));
- else
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] != NULL)
- rxq_mac_addrs_del((*priv->rxqs)[i]);
- priv->vlan_filter[j].enabled = 0;
- if (priv->started) {
- if (priv->rss)
- rxq_mac_addrs_add(LIST_FIRST(&priv->parents));
- else
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- rxq_mac_addrs_add((*priv->rxqs)[i]);
- }
- }
- }
- return 0;
-}
-
-/**
- * DPDK callback to configure a VLAN filter.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param vlan_id
- * VLAN ID to filter.
- * @param on
- * Toggle filter.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
- if (mlx4_is_secondary())
- return -E_RTE_SECONDARY;
- priv_lock(priv);
- if (priv->isolated) {
- DEBUG("%p: cannot set vlan filter, "
- "device is in isolated mode", (void *)dev);
- priv_unlock(priv);
- return -EINVAL;
- }
- ret = vlan_filter_set(dev, vlan_id, on);
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
-}
-
-const struct rte_flow_ops mlx4_flow_ops = {
- .validate = mlx4_flow_validate,
- .create = mlx4_flow_create,
- .destroy = mlx4_flow_destroy,
- .flush = mlx4_flow_flush,
- .query = NULL,
- .isolate = mlx4_flow_isolate,
-};
-
-/**
- * Manage filter operations.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
- * Pointer to operation-specific structure.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-mlx4_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- int ret = EINVAL;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &mlx4_flow_ops;
- return 0;
- case RTE_ETH_FILTER_FDIR:
- DEBUG("%p: filter type FDIR is not supported by this PMD",
- (void *)dev);
- break;
- default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
- break;
- }
- return -ret;
-}
-
static const struct eth_dev_ops mlx4_dev_ops = {
.dev_configure = mlx4_dev_configure,
.dev_start = mlx4_dev_start,
.dev_stop = mlx4_dev_stop,
- .dev_set_link_down = mlx4_set_link_down,
- .dev_set_link_up = mlx4_set_link_up,
+ .dev_set_link_down = mlx4_dev_set_link_down,
+ .dev_set_link_up = mlx4_dev_set_link_up,
.dev_close = mlx4_dev_close,
+ .link_update = mlx4_link_update,
.promiscuous_enable = mlx4_promiscuous_enable,
.promiscuous_disable = mlx4_promiscuous_disable,
.allmulticast_enable = mlx4_allmulticast_enable,
.allmulticast_disable = mlx4_allmulticast_disable,
- .link_update = mlx4_link_update,
+ .mac_addr_remove = mlx4_mac_addr_remove,
+ .mac_addr_add = mlx4_mac_addr_add,
+ .mac_addr_set = mlx4_mac_addr_set,
.stats_get = mlx4_stats_get,
.stats_reset = mlx4_stats_reset,
- .queue_stats_mapping_set = NULL,
.dev_infos_get = mlx4_dev_infos_get,
.dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
.vlan_filter_set = mlx4_vlan_filter_set,
- .vlan_tpid_set = NULL,
- .vlan_strip_queue_set = NULL,
- .vlan_offload_set = NULL,
.rx_queue_setup = mlx4_rx_queue_setup,
.tx_queue_setup = mlx4_tx_queue_setup,
.rx_queue_release = mlx4_rx_queue_release,
.tx_queue_release = mlx4_tx_queue_release,
- .dev_led_on = NULL,
- .dev_led_off = NULL,
- .flow_ctrl_get = mlx4_dev_get_flow_ctrl,
- .flow_ctrl_set = mlx4_dev_set_flow_ctrl,
- .priority_flow_ctrl_set = NULL,
- .mac_addr_remove = mlx4_mac_addr_remove,
- .mac_addr_add = mlx4_mac_addr_add,
- .mac_addr_set = mlx4_mac_addr_set,
- .mtu_set = mlx4_dev_set_mtu,
- .filter_ctrl = mlx4_dev_filter_ctrl,
+ .flow_ctrl_get = mlx4_flow_ctrl_get,
+ .flow_ctrl_set = mlx4_flow_ctrl_set,
+ .mtu_set = mlx4_mtu_set,
+ .filter_ctrl = mlx4_filter_ctrl,
.rx_queue_intr_enable = mlx4_rx_intr_enable,
.rx_queue_intr_disable = mlx4_rx_intr_disable,
};
@@ -5381,7 +267,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
* PCI bus address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
@@ -5392,8 +278,10 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
MKSTR(path, "%s/device/uevent", device->ibdev_path);
file = fopen(path, "rb");
- if (file == NULL)
- return -1;
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
while (fgets(line, sizeof(line), file) == line) {
size_t len = strlen(line);
int ret;
@@ -5423,572 +311,48 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
}
/**
- * Get MAC address by querying netdevice.
- *
- * @param[in] priv
- * struct priv for the requested device.
- * @param[out] mac
- * MAC address output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
-{
- struct ifreq request;
-
- if (priv_ifreq(priv, SIOCGIFHWADDR, &request))
- return -1;
- memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
- return 0;
-}
-
-/* Support up to 32 adapters. */
-static struct {
- struct rte_pci_addr pci_addr; /* associated PCI address */
- uint32_t ports; /* physical ports bitfield. */
-} mlx4_dev[32];
-
-/**
- * Get device index in mlx4_dev[] from PCI bus address.
- *
- * @param[in] pci_addr
- * PCI bus address to look for.
- *
- * @return
- * mlx4_dev[] index on success, -1 on failure.
- */
-static int
-mlx4_dev_idx(struct rte_pci_addr *pci_addr)
-{
- unsigned int i;
- int ret = -1;
-
- assert(pci_addr != NULL);
- for (i = 0; (i != elemof(mlx4_dev)); ++i) {
- if ((mlx4_dev[i].pci_addr.domain == pci_addr->domain) &&
- (mlx4_dev[i].pci_addr.bus == pci_addr->bus) &&
- (mlx4_dev[i].pci_addr.devid == pci_addr->devid) &&
- (mlx4_dev[i].pci_addr.function == pci_addr->function))
- return i;
- if ((mlx4_dev[i].ports == 0) && (ret == -1))
- ret = i;
- }
- return ret;
-}
-
-/**
- * Retrieve integer value from environment variable.
- *
- * @param[in] name
- * Environment variable name.
- *
- * @return
- * Integer value, 0 if the variable is not set.
- */
-static int
-mlx4_getenv_int(const char *name)
-{
- const char *val = getenv(name);
-
- if (val == NULL)
- return 0;
- return atoi(val);
-}
-
-static void
-mlx4_dev_link_status_handler(void *);
-static void
-mlx4_dev_interrupt_handler(void *);
-
-/**
- * Link/device status handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @param events
- * Pointer to event flags holder.
- *
- * @return
- * Number of events
- */
-static int
-priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
- uint32_t *events)
-{
- struct ibv_async_event event;
- int port_change = 0;
- struct rte_eth_link *link = &dev->data->dev_link;
- int ret = 0;
-
- *events = 0;
- /* Read all message and acknowledge them. */
- for (;;) {
- if (ibv_get_async_event(priv->ctx, &event))
- break;
- if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
- event.event_type == IBV_EVENT_PORT_ERR) &&
- (priv->intr_conf.lsc == 1)) {
- port_change = 1;
- ret++;
- } else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
- priv->intr_conf.rmv == 1) {
- *events |= (1 << RTE_ETH_EVENT_INTR_RMV);
- ret++;
- } else
- DEBUG("event type %d on port %d not handled",
- event.event_type, event.element.port_num);
- ibv_ack_async_event(&event);
- }
- if (!port_change)
- return ret;
- mlx4_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
- if (!priv->pending_alarm) {
- /* Inconsistent status, check again later. */
- priv->pending_alarm = 1;
- rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
- mlx4_dev_link_status_handler,
- dev);
- }
- } else {
- *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
- }
- return ret;
-}
-
-/**
- * Handle delayed link status event.
- *
- * @param arg
- * Registered argument.
- */
-static void
-mlx4_dev_link_status_handler(void *arg)
-{
- struct rte_eth_dev *dev = arg;
- struct priv *priv = dev->data->dev_private;
- uint32_t events;
- int ret;
-
- priv_lock(priv);
- assert(priv->pending_alarm == 1);
- priv->pending_alarm = 0;
- ret = priv_dev_status_handler(priv, dev, &events);
- priv_unlock(priv);
- if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
- NULL);
-}
-
-/**
- * Handle interrupts from the NIC.
- *
- * @param[in] intr_handle
- * Interrupt handler.
- * @param cb_arg
- * Callback argument.
- */
-static void
-mlx4_dev_interrupt_handler(void *cb_arg)
-{
- struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
- int ret;
- uint32_t ev;
- int i;
-
- priv_lock(priv);
- ret = priv_dev_status_handler(priv, dev, &ev);
- priv_unlock(priv);
- if (ret > 0) {
- for (i = RTE_ETH_EVENT_UNKNOWN;
- i < RTE_ETH_EVENT_MAX;
- i++) {
- if (ev & (1 << i)) {
- ev &= ~(1 << i);
- _rte_eth_dev_callback_process(dev, i, NULL,
- NULL);
- ret--;
- }
- }
- if (ret)
- WARN("%d event%s not processed", ret,
- (ret > 1 ? "s were" : " was"));
- }
-}
-
-/**
- * Uninstall interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
-{
- int ret;
-
- if (priv->intr_conf.lsc ||
- priv->intr_conf.rmv)
- return 0;
- ret = rte_intr_callback_unregister(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
- if (ret < 0) {
- ERROR("rte_intr_callback_unregister failed with %d"
- "%s%s%s", ret,
- (errno ? " (errno: " : ""),
- (errno ? strerror(errno) : ""),
- (errno ? ")" : ""));
- }
- priv->intr_handle.fd = 0;
- priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
- return ret;
-}
-
-/**
- * Install interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative errno value on failure.
- */
-static int
-priv_dev_interrupt_handler_install(struct priv *priv,
- struct rte_eth_dev *dev)
-{
- int flags;
- int rc;
-
- /* Check whether the interrupt handler has already been installed
- * for either type of interrupt
- */
- if (priv->intr_conf.lsc &&
- priv->intr_conf.rmv &&
- priv->intr_handle.fd)
- return 0;
- assert(priv->ctx->async_fd > 0);
- flags = fcntl(priv->ctx->async_fd, F_GETFL);
- rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (rc < 0) {
- INFO("failed to change file descriptor async event queue");
- dev->data->dev_conf.intr_conf.lsc = 0;
- dev->data->dev_conf.intr_conf.rmv = 0;
- return -errno;
- } else {
- priv->intr_handle.fd = priv->ctx->async_fd;
- priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
- rc = rte_intr_callback_register(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
- if (rc) {
- ERROR("rte_intr_callback_register failed "
- " (errno: %s)", strerror(errno));
- return rc;
- }
- }
- return 0;
-}
-
-/**
- * Uninstall interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative value on error.
- */
-static int
-priv_dev_removal_interrupt_handler_uninstall(struct priv *priv,
- struct rte_eth_dev *dev)
-{
- if (dev->data->dev_conf.intr_conf.rmv) {
- priv->intr_conf.rmv = 0;
- return priv_dev_interrupt_handler_uninstall(priv, dev);
- }
- return 0;
-}
-
-/**
- * Uninstall interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative value on error,
- */
-static int
-priv_dev_link_interrupt_handler_uninstall(struct priv *priv,
- struct rte_eth_dev *dev)
-{
- int ret = 0;
-
- if (dev->data->dev_conf.intr_conf.lsc) {
- priv->intr_conf.lsc = 0;
- ret = priv_dev_interrupt_handler_uninstall(priv, dev);
- if (ret)
- return ret;
- }
- if (priv->pending_alarm)
- if (rte_eal_alarm_cancel(mlx4_dev_link_status_handler,
- dev)) {
- ERROR("rte_eal_alarm_cancel failed "
- " (errno: %s)", strerror(rte_errno));
- return -rte_errno;
- }
- priv->pending_alarm = 0;
- return 0;
-}
-
-/**
- * Install link interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative value on error.
- */
-static int
-priv_dev_link_interrupt_handler_install(struct priv *priv,
- struct rte_eth_dev *dev)
-{
- int ret;
-
- if (dev->data->dev_conf.intr_conf.lsc) {
- ret = priv_dev_interrupt_handler_install(priv, dev);
- if (ret)
- return ret;
- priv->intr_conf.lsc = 1;
- }
- return 0;
-}
-
-/**
- * Install removal interrupt handler.
- *
- * @param priv
- * Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
- * @return
- * 0 on success, negative value on error.
- */
-static int
-priv_dev_removal_interrupt_handler_install(struct priv *priv,
- struct rte_eth_dev *dev)
-{
- int ret;
-
- if (dev->data->dev_conf.intr_conf.rmv) {
- ret = priv_dev_interrupt_handler_install(priv, dev);
- if (ret)
- return ret;
- priv->intr_conf.rmv = 1;
- }
- return 0;
-}
-
-/**
- * Allocate queue vector and fill epoll fd list for Rx interrupts.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, negative on failure.
- */
-static int
-priv_rx_intr_vec_enable(struct priv *priv)
-{
- unsigned int i;
- unsigned int rxqs_n = priv->rxqs_n;
- unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- unsigned int count = 0;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
-
- if (!priv->dev->data->dev_conf.intr_conf.rxq)
- return 0;
- priv_rx_intr_vec_disable(priv);
- intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
- if (intr_handle->intr_vec == NULL) {
- ERROR("failed to allocate memory for interrupt vector,"
- " Rx interrupts will not be supported");
- return -ENOMEM;
- }
- intr_handle->type = RTE_INTR_HANDLE_EXT;
- for (i = 0; i != n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- int fd;
- int flags;
- int rc;
-
- /* Skip queues that cannot request interrupts. */
- if (!rxq || !rxq->channel) {
- /* Use invalid intr_vec[] index to disable entry. */
- intr_handle->intr_vec[i] =
- RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID;
- continue;
- }
- if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
- ERROR("too many Rx queues for interrupt vector size"
- " (%d), Rx interrupts cannot be enabled",
- RTE_MAX_RXTX_INTR_VEC_ID);
- priv_rx_intr_vec_disable(priv);
- return -1;
- }
- fd = rxq->channel->fd;
- flags = fcntl(fd, F_GETFL);
- rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
- if (rc < 0) {
- ERROR("failed to make Rx interrupt file descriptor"
- " %d non-blocking for queue index %d", fd, i);
- priv_rx_intr_vec_disable(priv);
- return rc;
- }
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = fd;
- count++;
- }
- if (!count)
- priv_rx_intr_vec_disable(priv);
- else
- intr_handle->nb_efd = count;
- return 0;
-}
-
-/**
- * Clean up Rx interrupts handler.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_rx_intr_vec_disable(struct priv *priv)
-{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
-
- rte_intr_free_epoll_fd(intr_handle);
- free(intr_handle->intr_vec);
- intr_handle->nb_efd = 0;
- intr_handle->intr_vec = NULL;
-}
-
-/**
- * DPDK callback for Rx queue interrupt enable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * Rx queue index.
- *
- * @return
- * 0 on success, negative on failure.
- */
-static int
-mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = (*priv->rxqs)[idx];
- int ret;
-
- if (!rxq || !rxq->channel)
- ret = EINVAL;
- else
- ret = ibv_req_notify_cq(rxq->cq, 0);
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", idx);
- return -ret;
-}
-
-/**
- * DPDK callback for Rx queue interrupt disable.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * Rx queue index.
- *
- * @return
- * 0 on success, negative on failure.
- */
-static int
-mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = (*priv->rxqs)[idx];
- struct ibv_cq *ev_cq;
- void *ev_ctx;
- int ret;
-
- if (!rxq || !rxq->channel) {
- ret = EINVAL;
- } else {
- ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != rxq->cq)
- ret = EINVAL;
- }
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- idx);
- else
- ibv_ack_cq_events(rxq->cq, 1);
- return -ret;
-}
-
-/**
* Verify and store value for device argument.
*
* @param[in] key
* Key argument to verify.
* @param[in] val
* Value associated with key.
- * @param out
- * User data.
+ * @param[in, out] conf
+ * Shared configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_arg_parse(const char *key, const char *val, void *out)
+mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
{
- struct mlx4_conf *conf = out;
unsigned long tmp;
errno = 0;
tmp = strtoul(val, NULL, 0);
if (errno) {
+ rte_errno = errno;
WARN("%s: \"%s\" is not a valid integer", key, val);
- return -errno;
+ return -rte_errno;
}
if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
- if (tmp >= MLX4_PMD_MAX_PHYS_PORTS) {
- ERROR("invalid port index %lu (max: %u)",
- tmp, MLX4_PMD_MAX_PHYS_PORTS - 1);
+ uint32_t ports = rte_log2_u32(conf->ports.present);
+
+ if (tmp >= ports) {
+ ERROR("port index %lu outside range [0,%" PRIu32 ")",
+ tmp, ports);
return -EINVAL;
}
- conf->active_ports |= 1 << tmp;
+ if (!(conf->ports.present & (1 << tmp))) {
+ rte_errno = EINVAL;
+ ERROR("invalid port index %lu", tmp);
+ return -rte_errno;
+ }
+ conf->ports.enabled |= 1 << tmp;
} else {
+ rte_errno = EINVAL;
WARN("%s: unknown parameter", key);
- return -EINVAL;
+ return -rte_errno;
}
return 0;
}
@@ -6000,7 +364,7 @@ mlx4_arg_parse(const char *key, const char *val, void *out)
* Device arguments structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
@@ -6014,15 +378,21 @@ mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
return 0;
kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
if (kvlist == NULL) {
+ rte_errno = EINVAL;
ERROR("failed to parse kvargs");
- return -EINVAL;
+ return -rte_errno;
}
/* Process parameters. */
for (i = 0; pmd_mlx4_init_params[i]; ++i) {
arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
while (arg_count-- > 0) {
- ret = rte_kvargs_process(kvlist, MLX4_PMD_PORT_KVARG,
- mlx4_arg_parse, conf);
+ ret = rte_kvargs_process(kvlist,
+ MLX4_PMD_PORT_KVARG,
+ (int (*)(const char *,
+ const char *,
+ void *))
+ mlx4_arg_parse,
+ conf);
if (ret != 0)
goto free_kvlist;
}
@@ -6046,7 +416,7 @@ static struct rte_pci_driver mlx4_driver;
* PCI device information.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
static int
mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
@@ -6057,30 +427,20 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
struct mlx4_conf conf = {
- .active_ports = 0,
+ .ports.present = 0,
};
unsigned int vf;
- int idx;
int i;
(void)pci_drv;
assert(pci_drv == &mlx4_driver);
- /* Get mlx4_dev[] index. */
- idx = mlx4_dev_idx(&pci_dev->addr);
- if (idx == -1) {
- ERROR("this driver cannot support any more adapters");
- return -ENOMEM;
- }
- DEBUG("using driver device index %d", idx);
-
- /* Save PCI address. */
- mlx4_dev[idx].pci_addr = pci_dev->addr;
list = ibv_get_device_list(&i);
if (list == NULL) {
- assert(errno);
- if (errno == ENOSYS)
+ rte_errno = errno;
+ assert(rte_errno);
+ if (rte_errno == ENOSYS)
ERROR("cannot list devices, is ib_uverbs loaded?");
- return -errno;
+ return -rte_errno;
}
assert(i >= 0);
/*
@@ -6111,190 +471,112 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
+ rte_errno = ENODEV;
ERROR("cannot access device, is mlx4_ib loaded?");
- return -ENODEV;
+ return -rte_errno;
case EINVAL:
+ rte_errno = EINVAL;
ERROR("cannot use device, are drivers up to date?");
- return -EINVAL;
+ return -rte_errno;
}
assert(err > 0);
- return -err;
+ rte_errno = err;
+ return -rte_errno;
}
ibv_dev = list[i];
-
DEBUG("device opened");
if (ibv_query_device(attr_ctx, &device_attr)) {
- err = ENODEV;
+ rte_errno = ENODEV;
goto error;
}
INFO("%u port(s) detected", device_attr.phys_port_cnt);
-
+ conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
- err = EINVAL;
+ rte_errno = EINVAL;
goto error;
}
/* Use all ports when none are defined */
- if (conf.active_ports == 0) {
- for (i = 0; i < MLX4_PMD_MAX_PHYS_PORTS; i++)
- conf.active_ports |= 1 << i;
- }
+ if (!conf.ports.enabled)
+ conf.ports.enabled = conf.ports.present;
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
- uint32_t test = (1 << i);
struct ibv_context *ctx = NULL;
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
struct rte_eth_dev *eth_dev = NULL;
-#ifdef HAVE_EXP_QUERY_DEVICE
- struct ibv_exp_device_attr exp_device_attr;
-#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
- /* If port is not active, skip. */
- if (!(conf.active_ports & (1 << i)))
+ /* If port is not enabled, skip. */
+ if (!(conf.ports.enabled & (1 << i)))
continue;
-#ifdef HAVE_EXP_QUERY_DEVICE
- exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
-#ifdef RSS_SUPPORT
- exp_device_attr.comp_mask |= IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ;
-#endif /* RSS_SUPPORT */
-#endif /* HAVE_EXP_QUERY_DEVICE */
-
- DEBUG("using port %u (%08" PRIx32 ")", port, test);
-
+ DEBUG("using port %u", port);
ctx = ibv_open_device(ibv_dev);
if (ctx == NULL) {
- err = ENODEV;
+ rte_errno = ENODEV;
goto port_error;
}
-
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
- ERROR("port query failed: %s", strerror(err));
- err = ENODEV;
+ rte_errno = err;
+ ERROR("port query failed: %s", strerror(rte_errno));
goto port_error;
}
-
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ rte_errno = ENOTSUP;
ERROR("port %d is not configured in Ethernet mode",
port);
- err = EINVAL;
goto port_error;
}
-
if (port_attr.state != IBV_PORT_ACTIVE)
DEBUG("port %d is not active: \"%s\" (%d)",
port, ibv_port_state_str(port_attr.state),
port_attr.state);
-
+ /* Make asynchronous FD non-blocking to handle interrupts. */
+ if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
+ ERROR("cannot make asynchronous FD non-blocking: %s",
+ strerror(rte_errno));
+ goto port_error;
+ }
/* Allocate protection domain. */
pd = ibv_alloc_pd(ctx);
if (pd == NULL) {
+ rte_errno = ENOMEM;
ERROR("PD allocation failure");
- err = ENOMEM;
goto port_error;
}
-
- mlx4_dev[idx].ports |= test;
-
/* from rte_ethdev.c */
priv = rte_zmalloc("ethdev private structure",
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
+ rte_errno = ENOMEM;
ERROR("priv allocation failure");
- err = ENOMEM;
goto port_error;
}
-
priv->ctx = ctx;
priv->device_attr = device_attr;
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
-#ifdef HAVE_EXP_QUERY_DEVICE
- if (ibv_exp_query_device(ctx, &exp_device_attr)) {
- ERROR("ibv_exp_query_device() failed");
- err = ENODEV;
- goto port_error;
- }
-#ifdef RSS_SUPPORT
- if ((exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_QPG) &&
- (exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_UD_RSS) &&
- (exp_device_attr.comp_mask &
- IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ) &&
- (exp_device_attr.max_rss_tbl_sz > 0)) {
- priv->hw_qpg = 1;
- priv->hw_rss = 1;
- priv->max_rss_tbl_sz = exp_device_attr.max_rss_tbl_sz;
- } else {
- priv->hw_qpg = 0;
- priv->hw_rss = 0;
- priv->max_rss_tbl_sz = 0;
- }
- priv->hw_tss = !!(exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_UD_TSS);
- DEBUG("device flags: %s%s%s",
- (priv->hw_qpg ? "IBV_DEVICE_QPG " : ""),
- (priv->hw_tss ? "IBV_DEVICE_TSS " : ""),
- (priv->hw_rss ? "IBV_DEVICE_RSS " : ""));
- if (priv->hw_rss)
- DEBUG("maximum RSS indirection table size: %u",
- exp_device_attr.max_rss_tbl_sz);
-#endif /* RSS_SUPPORT */
-
- priv->hw_csum =
- ((exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
- (exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
+ priv->vf = vf;
+ priv->hw_csum = !!(device_attr.device_cap_flags &
+ IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
(priv->hw_csum ? "" : "not "));
-
- priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_VXLAN_SUPPORT);
+ /* Only ConnectX-3 Pro supports tunneling. */
+ priv->hw_csum_l2tun =
+ priv->hw_csum &&
+ (device_attr.vendor_part_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
DEBUG("L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not "));
-
-#ifdef INLINE_RECV
- priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE");
-
- if (priv->inl_recv_size) {
- exp_device_attr.comp_mask =
- IBV_EXP_DEVICE_ATTR_INLINE_RECV_SZ;
- if (ibv_exp_query_device(ctx, &exp_device_attr)) {
- INFO("Couldn't query device for inline-receive"
- " capabilities.");
- priv->inl_recv_size = 0;
- } else {
- if ((unsigned)exp_device_attr.inline_recv_sz <
- priv->inl_recv_size) {
- INFO("Max inline-receive (%d) <"
- " requested inline-receive (%u)",
- exp_device_attr.inline_recv_sz,
- priv->inl_recv_size);
- priv->inl_recv_size =
- exp_device_attr.inline_recv_sz;
- }
- }
- INFO("Set inline receive size to %u",
- priv->inl_recv_size);
- }
-#endif /* INLINE_RECV */
-#endif /* HAVE_EXP_QUERY_DEVICE */
-
- (void)mlx4_getenv_int;
- priv->vf = vf;
/* Configure the first MAC address by default. */
- if (priv_get_mac(priv, &mac.addr_bytes)) {
+ if (mlx4_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
- " (errno: %s)", strerror(errno));
- err = ENODEV;
+ " (rte_errno: %s)", strerror(rte_errno));
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -6302,18 +584,13 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
mac.addr_bytes[0], mac.addr_bytes[1],
mac.addr_bytes[2], mac.addr_bytes[3],
mac.addr_bytes[4], mac.addr_bytes[5]);
- /* Register MAC and broadcast addresses. */
- claim_zero(priv_mac_addr_add(priv, 0,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac.addr_bytes));
- claim_zero(priv_mac_addr_add(priv, (elemof(priv->mac) - 1),
- &(const uint8_t [ETHER_ADDR_LEN])
- { "\xff\xff\xff\xff\xff\xff" }));
+ /* Register MAC address. */
+ priv->mac[0] = mac;
#ifndef NDEBUG
{
char ifname[IF_NAMESIZE];
- if (priv_get_ifname(priv, &ifname) == 0)
+ if (mlx4_get_ifname(priv, &ifname) == 0)
DEBUG("port %u ifname is \"%s\"",
priv->port, ifname);
else
@@ -6321,9 +598,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
#endif
/* Get actual MTU if possible. */
- priv_get_mtu(priv, &priv->mtu);
+ mlx4_mtu_get(priv, &priv->mtu);
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
-
/* from rte_ethdev.c */
{
char name[RTE_ETH_NAME_MAX_LEN];
@@ -6334,67 +610,41 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
if (eth_dev == NULL) {
ERROR("can not allocate rte ethdev");
- err = ENOMEM;
+ rte_errno = ENOMEM;
goto port_error;
}
-
- /* Secondary processes have to use local storage for their
- * private data as well as a copy of eth_dev->data, but this
- * pointer must not be modified before burst functions are
- * actually called. */
- if (mlx4_is_secondary()) {
- struct mlx4_secondary_data *sd =
- &mlx4_secondary_data[eth_dev->data->port_id];
-
- sd->primary_priv = eth_dev->data->dev_private;
- if (sd->primary_priv == NULL) {
- ERROR("no private data for port %u",
- eth_dev->data->port_id);
- err = EINVAL;
- goto port_error;
- }
- sd->shared_dev_data = eth_dev->data;
- rte_spinlock_init(&sd->lock);
- memcpy(sd->data.name, sd->shared_dev_data->name,
- sizeof(sd->data.name));
- sd->data.dev_private = priv;
- sd->data.rx_mbuf_alloc_failed = 0;
- sd->data.mtu = ETHER_MTU;
- sd->data.port_id = sd->shared_dev_data->port_id;
- sd->data.mac_addrs = priv->mac;
- eth_dev->tx_pkt_burst = mlx4_tx_burst_secondary_setup;
- eth_dev->rx_pkt_burst = mlx4_rx_burst_secondary_setup;
- } else {
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = priv->mac;
- }
+ eth_dev->data->dev_private = priv;
+ eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
-
eth_dev->device->driver = &mlx4_driver.driver;
-
+ /* Initialize local interrupt handle for current port. */
+ priv->intr_handle = (struct rte_intr_handle){
+ .fd = -1,
+ .type = RTE_INTR_HANDLE_EXT,
+ };
/*
- * Copy and override interrupt handle to prevent it from
- * being shared between all ethdev instances of a given PCI
- * device. This is required to properly handle Rx interrupts
- * on all ports.
+ * Override ethdev interrupt handle pointer with private
+ * handle instead of that of the parent PCI device used by
+ * default. This prevents it from being shared between all
+ * ports of the same PCI device since each of them is
+ * associated its own Verbs context.
+ *
+ * Rx interrupts in particular require this as the PMD has
+ * no control over the registration of queue interrupts
+ * besides setting up eth_dev->intr_handle, the rest is
+ * handled by rte_intr_rx_ctl().
*/
- priv->intr_handle_dev = *eth_dev->intr_handle;
- eth_dev->intr_handle = &priv->intr_handle_dev;
-
+ eth_dev->intr_handle = &priv->intr_handle;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
-
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
- priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ mlx4_dev_set_link_up(priv->dev);
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
continue;
-
port_error:
rte_free(priv);
if (pd)
@@ -6405,27 +655,21 @@ port_error:
rte_eth_dev_release_port(eth_dev);
break;
}
-
+ if (i == device_attr.phys_port_cnt)
+ return 0;
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
* long as the dpdk does not provide a way to deallocate a ethdev and a
* way to enumerate the registered ethdevs to free the previous ones.
*/
-
- /* no port found, complain */
- if (!mlx4_dev[idx].ports) {
- err = ENODEV;
- goto error;
- }
-
error:
if (attr_ctx)
claim_zero(ibv_close_device(attr_ctx));
if (list)
ibv_free_device_list(list);
- assert(err >= 0);
- return -err;
+ assert(rte_errno >= 0);
+ return -rte_errno;
}
static const struct rte_pci_id mlx4_pci_id_map[] = {
@@ -6463,7 +707,6 @@ RTE_INIT(rte_mlx4_pmd_init);
static void
rte_mlx4_pmd_init(void)
{
- RTE_BUILD_BUG_ON(sizeof(wr_id_t) != sizeof(uint64_t));
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index c0ade4f1..3aeef87e 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2017 6WIND S.A.
- * Copyright 2012-2017 Mellanox.
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,29 +34,11 @@
#ifndef RTE_PMD_MLX4_H_
#define RTE_PMD_MLX4_H_
-#include <stddef.h>
+#include <net/if.h>
#include <stdint.h>
-#include <limits.h>
+#include <sys/queue.h>
-/*
- * Runtime logging through RTE_LOG() is enabled when not in debugging mode.
- * Intermediate LOG_*() macros add the required end-of-line characters.
- */
-#ifndef NDEBUG
-#define INFO(...) DEBUG(__VA_ARGS__)
-#define WARN(...) DEBUG(__VA_ARGS__)
-#define ERROR(...) DEBUG(__VA_ARGS__)
-#else
-#define LOG__(level, m, ...) \
- RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
-#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
-#define INFO(...) LOG_(INFO, __VA_ARGS__)
-#define WARN(...) LOG_(WARNING, __VA_ARGS__)
-#define ERROR(...) LOG_(ERR, __VA_ARGS__)
-#endif
-
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
@@ -65,36 +47,25 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/*
- * Maximum number of simultaneous MAC addresses supported.
- *
- * According to ConnectX's Programmer Reference Manual:
- * The L2 Address Match is implemented by comparing a MAC/VLAN combination
- * of 128 MAC addresses and 127 VLAN values, comprising 128x127 possible
- * L2 addresses.
- */
-#define MLX4_MAX_MAC_ADDRESSES 128
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_interrupts.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
-/* Maximum number of simultaneous VLAN filters supported. See above. */
-#define MLX4_MAX_VLAN_IDS 127
+/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
+#define MLX4_MAX_MAC_ADDRESSES 128
-/* Request send completion once in every 64 sends, might be less. */
+/** Request send completion once in every 64 sends, might be less. */
#define MLX4_PMD_TX_PER_COMP_REQ 64
-/* Maximum number of physical ports. */
-#define MLX4_PMD_MAX_PHYS_PORTS 2
-
-/* Maximum number of Scatter/Gather Elements per Work Request. */
-#ifndef MLX4_PMD_SGE_WR_N
-#define MLX4_PMD_SGE_WR_N 4
-#endif
-
-/* Maximum size for inline data. */
-#ifndef MLX4_PMD_MAX_INLINE
+/** Maximum size for inline data. */
#define MLX4_PMD_MAX_INLINE 0
-#endif
-/*
+/** Fixed RSS hash key size in bytes. Cannot be modified. */
+#define MLX4_RSS_HASH_KEY_SIZE 40
+
+/**
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
* from which buffers are to be transmitted will have to be mapped by this
* driver to their own Memory Region (MR). This is a slow operation.
@@ -105,18 +76,10 @@
#define MLX4_PMD_TX_MP_CACHE 8
#endif
-/*
- * If defined, only use software counters. The PMD will never ask the hardware
- * for these, and many of them won't be available.
- */
-#ifndef MLX4_PMD_SOFT_COUNTERS
-#define MLX4_PMD_SOFT_COUNTERS 1
-#endif
-
-/* Alarm timeout. */
-#define MLX4_ALARM_TIMEOUT_US 100000
+/** Interrupt alarm timeout value in microseconds. */
+#define MLX4_INTR_ALARM_TIMEOUT 100000
-/* Port parameter. */
+/** Port parameter. */
#define MLX4_PMD_PORT_KVARG "port"
enum {
@@ -129,258 +92,92 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
};
+/** Driver name reported to lower layers and used in log output. */
#define MLX4_DRIVER_NAME "net_mlx4"
-/* Bit-field manipulation. */
-#define BITFIELD_DECLARE(bf, type, size) \
- type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
- !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
-#define BITFIELD_DEFINE(bf, type, size) \
- BITFIELD_DECLARE((bf), type, (size)) = { 0 }
-#define BITFIELD_SET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_RESET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
- ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
-#define BITFIELD_ISSET(bf, b) \
- (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
- !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
- ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
-
-/* Number of elements in array. */
-#define elemof(a) (sizeof(a) / sizeof((a)[0]))
-
-/* Cast pointer p to structure member m to its parent structure of type t. */
-#define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m)))
-
-/* Branch prediction helpers. */
-#ifndef likely
-#define likely(c) __builtin_expect(!!(c), 1)
-#endif
-#ifndef unlikely
-#define unlikely(c) __builtin_expect(!!(c), 0)
-#endif
-
-/* Debugging */
-#ifndef NDEBUG
-#include <stdio.h>
-#define DEBUG__(m, ...) \
- (fprintf(stderr, "%s:%d: %s(): " m "%c", \
- __FILE__, __LINE__, __func__, __VA_ARGS__), \
- fflush(stderr), \
- (void)0)
-/*
- * Save/restore errno around DEBUG__().
- * XXX somewhat undefined behavior, but works.
- */
-#define DEBUG_(...) \
- (errno = ((int []){ \
- *(volatile int *)&errno, \
- (DEBUG__(__VA_ARGS__), 0) \
- })[0])
-#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
-#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
-#define claim_zero(...) \
- (void)(((__VA_ARGS__) == 0) || \
- DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
-#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
-#define claim_positive(...) assert((__VA_ARGS__) >= 0)
-#else /* NDEBUG */
-/* No-ops. */
-#define DEBUG(...) (void)0
-#define claim_zero(...) (__VA_ARGS__)
-#define claim_nonzero(...) (__VA_ARGS__)
-#define claim_positive(...) (__VA_ARGS__)
-#endif /* NDEBUG */
-
-struct mlx4_rxq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t ipackets; /**< Total of successfully received packets. */
- uint64_t ibytes; /**< Total of successfully received bytes. */
-#endif
- uint64_t idropped; /**< Total of packets dropped when RX ring full. */
- uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
-};
-
-/* RX element (scattered packets). */
-struct rxq_elt_sp {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
- struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
-};
-
-/* RX element. */
-struct rxq_elt {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sge; /* Scatter/Gather Element. */
- /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
-};
-
-/* RX queue descriptor. */
-struct rxq {
- LIST_ENTRY(rxq) next; /* Used by parent queue only */
- struct priv *priv; /* Back pointer to private data. */
- struct rte_mempool *mp; /* Memory Pool for allocations. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
- struct ibv_comp_channel *channel;
- /*
- * Each VLAN ID requires a separate flow steering rule.
- */
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
- struct ibv_flow *promisc_flow; /* Promiscuous flow. */
- struct ibv_flow *allmulti_flow; /* Multicast flow. */
- unsigned int port_id; /* Port ID for incoming packets. */
- unsigned int elts_n; /* (*elts)[] length. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- union {
- struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
- struct rxq_elt (*no_sp)[]; /* RX elements. */
- } elts;
- unsigned int sp:1; /* Use scattered RX elements. */
- unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
- struct mlx4_rxq_stats stats; /* RX queue counters. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
- struct {
- uint16_t queues_n;
- uint16_t queues[RTE_MAX_QUEUES_PER_PORT];
- } rss;
-};
-
-/* TX element. */
-struct txq_elt {
- struct rte_mbuf *buf;
-};
-
-struct mlx4_txq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t opackets; /**< Total of successfully sent packets. */
- uint64_t obytes; /**< Total of successfully sent bytes. */
-#endif
- uint64_t odropped; /**< Total of packets not sent when TX ring full. */
-};
-
-/*
- * Linear buffer type. It is used when transmitting buffers with too many
- * segments that do not fit the hardware queue (see max_send_sge).
- * Extra segments are copied (linearized) in such buffers, replacing the
- * last SGE during TX.
- * The size is arbitrary but large enough to hold a jumbo frame with
- * 8 segments considering mbuf.buf_len is about 2048 bytes.
- */
-typedef uint8_t linear_t[16384];
+struct mlx4_drop;
+struct mlx4_rss;
+struct rxq;
+struct txq;
+struct rte_flow;
-/* TX queue descriptor. */
-struct txq {
- struct priv *priv; /* Back pointer to private data. */
- struct {
- const struct rte_mempool *mp; /* Cached Memory Pool. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* mr->lkey */
- } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#if MLX4_PMD_MAX_INLINE > 0
- uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
-#endif
- unsigned int elts_n; /* (*elts)[] length. */
- struct txq_elt (*elts)[]; /* TX elements. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- unsigned int elts_tail; /* First element awaiting completion. */
- unsigned int elts_comp; /* Number of completion requests. */
- unsigned int elts_comp_cd; /* Countdown for next completion request. */
- unsigned int elts_comp_cd_init; /* Initial value for countdown. */
- struct mlx4_txq_stats stats; /* TX queue counters. */
- linear_t (*elts_linear)[]; /* Linearized buffers. */
- struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
+/** Memory region descriptor. */
+struct mlx4_mr {
+ LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */
+ uintptr_t start; /**< Base address for memory region. */
+ uintptr_t end; /**< End address for memory region. */
+ uint32_t lkey; /**< L_Key extracted from @p mr. */
+ uint32_t refcnt; /**< Reference count for this object. */
+ struct priv *priv; /**< Back pointer to private data. */
+ struct ibv_mr *mr; /**< Memory region associated with @p mp. */
+ struct rte_mempool *mp; /**< Target memory pool (mempool). */
};
-struct rte_flow;
-
+/** Private data structure. */
struct priv {
- struct rte_eth_dev *dev; /* Ethernet device. */
- struct ibv_context *ctx; /* Verbs context. */
- struct ibv_device_attr device_attr; /* Device properties. */
- struct ibv_pd *pd; /* Protection Domain. */
- /*
- * MAC addresses array and configuration bit-field.
- * An extra entry that cannot be modified by the DPDK is reserved
- * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
- */
- struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- /* VLAN filters. */
- struct {
- unsigned int enabled:1; /* If enabled. */
- unsigned int id:12; /* VLAN ID (0-4095). */
- } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
+ struct rte_eth_dev *dev; /**< Ethernet device. */
+ struct ibv_context *ctx; /**< Verbs context. */
+ struct ibv_device_attr device_attr; /**< Device properties. */
+ struct ibv_pd *pd; /**< Protection Domain. */
/* Device properties. */
- uint16_t mtu; /* Configured MTU. */
- uint8_t port; /* Physical port number. */
- unsigned int started:1; /* Device started, flows enabled. */
- unsigned int promisc:1; /* Device in promiscuous mode. */
- unsigned int allmulti:1; /* Device receives all multicast packets. */
- unsigned int hw_qpg:1; /* QP groups are supported. */
- unsigned int hw_tss:1; /* TSS is supported. */
- unsigned int hw_rss:1; /* RSS is supported. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int rss:1; /* RSS is enabled. */
- unsigned int vf:1; /* This is a VF device. */
- unsigned int pending_alarm:1; /* An alarm is pending. */
- unsigned int isolated:1; /* Toggle isolated mode. */
-#ifdef INLINE_RECV
- unsigned int inl_recv_size; /* Inline recv size */
-#endif
- unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
- /* RX/TX queues. */
- unsigned int rxqs_n; /* RX queues array size. */
- unsigned int txqs_n; /* TX queues array size. */
- struct rxq *(*rxqs)[]; /* RX queues. */
- struct txq *(*txqs)[]; /* TX queues. */
- struct rte_intr_handle intr_handle_dev; /* Device interrupt handler. */
- struct rte_intr_handle intr_handle; /* Interrupt handler. */
- struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
- LIST_HEAD(mlx4_flows, rte_flow) flows;
- struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
- LIST_HEAD(mlx4_parents, rxq) parents;
- rte_spinlock_t lock; /* Lock for control functions. */
+ uint16_t mtu; /**< Configured MTU. */
+ uint8_t port; /**< Physical port number. */
+ uint32_t started:1; /**< Device started, flows enabled. */
+ uint32_t vf:1; /**< This is a VF device. */
+ uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
+ uint32_t isolated:1; /**< Toggle isolated mode. */
+ uint32_t hw_csum:1; /* Checksum offload is supported. */
+ uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
+ struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
+ struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
+ LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
+ LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
+ LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */
+ rte_spinlock_t mr_lock; /**< Lock for @p mr access. */
+ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
+ /**< Configured MAC addresses. Unused entries are zeroed. */
};
-void priv_lock(struct priv *priv);
-void priv_unlock(struct priv *priv);
-
-int
-rxq_create_qp(struct rxq *rxq,
- uint16_t desc,
- int inactive,
- int children_n,
- struct rxq *rxq_parent);
-
-void
-rxq_parent_cleanup(struct rxq *parent);
-
-struct rxq *
-priv_parent_create(struct priv *priv,
- uint16_t queues[],
- uint16_t children_n);
+/* mlx4_ethdev.c */
+
+int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
+int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx4_mtu_get(struct priv *priv, uint16_t *mtu);
+int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
+int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
+void mlx4_promiscuous_enable(struct rte_eth_dev *dev);
+void mlx4_promiscuous_disable(struct rte_eth_dev *dev);
+void mlx4_allmulticast_enable(struct rte_eth_dev *dev);
+void mlx4_allmulticast_disable(struct rte_eth_dev *dev);
+void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq);
+void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void mlx4_stats_reset(struct rte_eth_dev *dev);
+void mlx4_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *info);
+int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
+/* mlx4_intr.c */
+
+int mlx4_intr_uninstall(struct priv *priv);
+int mlx4_intr_install(struct priv *priv);
+int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+
+/* mlx4_mr.c */
+
+struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp);
+void mlx4_mr_put(struct mlx4_mr *mr);
+uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp,
+ uint32_t i);
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
new file mode 100644
index 00000000..c2ea4db1
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -0,0 +1,1047 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Miscellaneous control operations for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <dirent.h>
+#include <errno.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <netinet/ip.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_pci.h>
+
+#include "mlx4.h"
+#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Get interface name from private structure.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] ifname
+ * Interface name output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+{
+ DIR *dir;
+ struct dirent *dent;
+ unsigned int dev_type = 0;
+ unsigned int dev_port_prev = ~0u;
+ char match[IF_NAMESIZE] = "";
+
+ {
+ MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
+
+ dir = opendir(path);
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ }
+ while ((dent = readdir(dir)) != NULL) {
+ char *name = dent->d_name;
+ FILE *file;
+ unsigned int dev_port;
+ int r;
+
+ if ((name[0] == '.') &&
+ ((name[1] == '\0') ||
+ ((name[1] == '.') && (name[2] == '\0'))))
+ continue;
+
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ctx->device->ibdev_path, name,
+ (dev_type ? "dev_id" : "dev_port"));
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ if (errno != ENOENT)
+ continue;
+ /*
+ * Switch to dev_id when dev_port does not exist as
+ * is the case with Linux kernel versions < 3.15.
+ */
+try_dev_id:
+ match[0] = '\0';
+ if (dev_type)
+ break;
+ dev_type = 1;
+ dev_port_prev = ~0u;
+ rewinddir(dir);
+ continue;
+ }
+ r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
+ fclose(file);
+ if (r != 1)
+ continue;
+ /*
+ * Switch to dev_id when dev_port returns the same value for
+ * all ports. May happen when using a MOFED release older than
+ * 3.0 with a Linux kernel >= 3.15.
+ */
+ if (dev_port == dev_port_prev)
+ goto try_dev_id;
+ dev_port_prev = dev_port;
+ if (dev_port == (priv->port - 1u))
+ snprintf(match, sizeof(match), "%s", name);
+ }
+ closedir(dir);
+ if (match[0] == '\0') {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ strncpy(*ifname, match, sizeof(*ifname));
+ return 0;
+}
+
+/**
+ * Read from sysfs entry.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[in] entry
+ * Entry name relative to sysfs path.
+ * @param[out] buf
+ * Data output buffer.
+ * @param size
+ * Buffer size.
+ *
+ * @return
+ * Number of bytes read on success, negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx4_sysfs_read(const struct priv *priv, const char *entry,
+ char *buf, size_t size)
+{
+ char ifname[IF_NAMESIZE];
+ FILE *file;
+ int ret;
+
+ ret = mlx4_get_ifname(priv, &ifname);
+ if (ret)
+ return ret;
+
+ MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
+ ifname, entry);
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = fread(buf, 1, size, file);
+ if ((size_t)ret < size && ferror(file)) {
+ rte_errno = EIO;
+ ret = -rte_errno;
+ } else {
+ ret = size;
+ }
+ fclose(file);
+ return ret;
+}
+
+/**
+ * Write to sysfs entry.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[in] entry
+ * Entry name relative to sysfs path.
+ * @param[in] buf
+ * Data buffer.
+ * @param size
+ * Buffer size.
+ *
+ * @return
+ * Number of bytes written on success, negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx4_sysfs_write(const struct priv *priv, const char *entry,
+ char *buf, size_t size)
+{
+ char ifname[IF_NAMESIZE];
+ FILE *file;
+ int ret;
+
+ ret = mlx4_get_ifname(priv, &ifname);
+ if (ret)
+ return ret;
+
+ MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
+ ifname, entry);
+
+ file = fopen(path, "wb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = fwrite(buf, 1, size, file);
+ if ((size_t)ret < size || ferror(file)) {
+ rte_errno = EIO;
+ ret = -rte_errno;
+ } else {
+ ret = size;
+ }
+ fclose(file);
+ return ret;
+}
+
+/**
+ * Get unsigned long sysfs property.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] name
+ * Entry name relative to sysfs path.
+ * @param[out] value
+ * Value output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
+{
+ int ret;
+ unsigned long value_ret;
+ char value_str[32];
+
+ ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
+ if (ret < 0) {
+ DEBUG("cannot read %s value from sysfs: %s",
+ name, strerror(rte_errno));
+ return ret;
+ }
+ value_str[ret] = '\0';
+ errno = 0;
+ value_ret = strtoul(value_str, NULL, 0);
+ if (errno) {
+ rte_errno = errno;
+ DEBUG("invalid %s value `%s': %s", name, value_str,
+ strerror(rte_errno));
+ return -rte_errno;
+ }
+ *value = value_ret;
+ return 0;
+}
+
+/**
+ * Set unsigned long sysfs property.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] name
+ * Entry name relative to sysfs path.
+ * @param value
+ * Value to set.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
+{
+ int ret;
+ MKSTR(value_str, "%lu", value);
+
+ ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
+ if (ret < 0) {
+ DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
+ name, value_str, value, strerror(rte_errno));
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * Perform ifreq ioctl() on associated Ethernet device.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param req
+ * Request number to pass to ioctl().
+ * @param[out] ifr
+ * Interface request structure output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+{
+ int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ int ret;
+
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = mlx4_get_ifname(priv, &ifr->ifr_name);
+ if (!ret && ioctl(sock, req, ifr) == -1) {
+ rte_errno = errno;
+ ret = -rte_errno;
+ }
+ close(sock);
+ return ret;
+}
+
+/**
+ * Get MAC address by querying netdevice.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] mac
+ * MAC address output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+{
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFHWADDR, &request);
+
+ if (ret)
+ return ret;
+ memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return 0;
+}
+
+/**
+ * Get device MTU.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] mtu
+ * MTU value output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
+{
+ unsigned long ulong_mtu = 0;
+ int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu);
+
+ if (ret)
+ return ret;
+ *mtu = ulong_mtu;
+ return 0;
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * @param priv
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * MTU value to set.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t new_mtu;
+ int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu);
+
+ if (ret)
+ return ret;
+ ret = mlx4_mtu_get(priv, &new_mtu);
+ if (ret)
+ return ret;
+ if (new_mtu == mtu) {
+ priv->mtu = mtu;
+ return 0;
+ }
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Set device flags.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param keep
+ * Bitmask for flags that must remain untouched.
+ * @param flags
+ * Bitmask for flags to modify.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+{
+ unsigned long tmp = 0;
+ int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp);
+
+ if (ret)
+ return ret;
+ tmp &= keep;
+ tmp |= (flags & (~keep));
+ return mlx4_set_sysfs_ulong(priv, "flags", tmp);
+}
+
+/**
+ * Change the link state (UP / DOWN).
+ *
+ * @param priv
+ * Pointer to Ethernet device private data.
+ * @param up
+ * Nonzero for link up, otherwise link down.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_dev_set_link(struct priv *priv, int up)
+{
+ int err;
+
+ if (up) {
+ err = mlx4_set_flags(priv, ~IFF_UP, IFF_UP);
+ if (err)
+ return err;
+ } else {
+ err = mlx4_set_flags(priv, ~IFF_UP, ~IFF_UP);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * DPDK callback to bring the link DOWN.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ return mlx4_dev_set_link(priv, 0);
+}
+
+/**
+ * DPDK callback to bring the link UP.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ return mlx4_dev_set_link(priv, 1);
+}
+
+/**
+ * Supported Rx mode toggles.
+ *
+ * Even and odd values respectively stand for off and on.
+ */
+enum rxmode_toggle {
+ RXMODE_TOGGLE_PROMISC_OFF,
+ RXMODE_TOGGLE_PROMISC_ON,
+ RXMODE_TOGGLE_ALLMULTI_OFF,
+ RXMODE_TOGGLE_ALLMULTI_ON,
+};
+
+/**
+ * Helper function to toggle promiscuous and all multicast modes.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param toggle
+ * Toggle to set.
+ */
+static void
+mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle)
+{
+ struct priv *priv = dev->data->dev_private;
+ const char *mode;
+ struct rte_flow_error error;
+
+ switch (toggle) {
+ case RXMODE_TOGGLE_PROMISC_OFF:
+ case RXMODE_TOGGLE_PROMISC_ON:
+ mode = "promiscuous";
+ dev->data->promiscuous = toggle & 1;
+ break;
+ case RXMODE_TOGGLE_ALLMULTI_OFF:
+ case RXMODE_TOGGLE_ALLMULTI_ON:
+ mode = "all multicast";
+ dev->data->all_multicast = toggle & 1;
+ break;
+ }
+ if (!mlx4_flow_sync(priv, &error))
+ return;
+ ERROR("cannot toggle %s mode (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ mode, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_ON);
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_PROMISC_OFF);
+}
+
+/**
+ * DPDK callback to enable all multicast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_ON);
+}
+
+/**
+ * DPDK callback to disable all multicast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ mlx4_rxmode_toggle(dev, RXMODE_TOGGLE_ALLMULTI_OFF);
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return;
+ }
+ memset(&priv->mac[index], 0, sizeof(priv->mac[index]));
+ if (!mlx4_flow_sync(priv, &error))
+ return;
+ ERROR("failed to synchronize flow rules after removing MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ int ret;
+
+ (void)vmdq;
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memcpy(&priv->mac[index], mac_addr, sizeof(priv->mac[index]));
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("failed to synchronize flow rules after adding MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ return ret;
+}
+
+/**
+ * DPDK callback to configure a VLAN filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * VLAN ID to filter.
+ * @param on
+ * Toggle filter.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ unsigned int vidx = vlan_id / 64;
+ unsigned int vbit = vlan_id % 64;
+ uint64_t *v;
+ int ret;
+
+ if (vidx >= RTE_DIM(dev->data->vlan_filter_conf.ids)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ v = &dev->data->vlan_filter_conf.ids[vidx];
+ *v &= ~(UINT64_C(1) << vbit);
+ *v |= (uint64_t)!!on << vbit;
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("failed to synchronize flow rules after %s VLAN filter on ID %u"
+ " (code %d, \"%s\"), "
+ " flow error type %d, cause %p, message: %s",
+ on ? "enabling" : "disabling", vlan_id,
+ rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ return ret;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+void
+mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] info
+ * Info structure output buffer.
+ */
+void
+mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int max;
+ char ifname[IF_NAMESIZE];
+
+ info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ /* FIXME: we should ask the device for these values. */
+ info->min_rx_bufsize = 32;
+ info->max_rx_pktlen = 65536;
+ /*
+ * Since we need one CQ per QP, the limit is the minimum number
+ * between the two values.
+ */
+ max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
+ priv->device_attr.max_qp : priv->device_attr.max_cq);
+ /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
+ if (max >= 65535)
+ max = 65535;
+ info->max_rx_queues = max;
+ info->max_tx_queues = max;
+ info->max_mac_addrs = RTE_DIM(priv->mac);
+ info->rx_offload_capa = 0;
+ info->tx_offload_capa = 0;
+ if (priv->hw_csum) {
+ info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (mlx4_get_ifname(priv, &ifname) == 0)
+ info->if_index = if_nametoindex(ifname);
+ info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
+ info->speed_capa =
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_56G;
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats structure output buffer.
+ */
+int
+mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct rte_eth_stats tmp;
+ unsigned int i;
+ unsigned int idx;
+
+ memset(&tmp, 0, sizeof(tmp));
+ /* Add software counters. */
+ for (i = 0; i != dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = dev->data->rx_queues[i];
+
+ if (rxq == NULL)
+ continue;
+ idx = rxq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ tmp.q_ipackets[idx] += rxq->stats.ipackets;
+ tmp.q_ibytes[idx] += rxq->stats.ibytes;
+ tmp.q_errors[idx] += (rxq->stats.idropped +
+ rxq->stats.rx_nombuf);
+ }
+ tmp.ipackets += rxq->stats.ipackets;
+ tmp.ibytes += rxq->stats.ibytes;
+ tmp.ierrors += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ for (i = 0; i != dev->data->nb_tx_queues; ++i) {
+ struct txq *txq = dev->data->tx_queues[i];
+
+ if (txq == NULL)
+ continue;
+ idx = txq->stats.idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ tmp.q_opackets[idx] += txq->stats.opackets;
+ tmp.q_obytes[idx] += txq->stats.obytes;
+ tmp.q_errors[idx] += txq->stats.odropped;
+ }
+ tmp.opackets += txq->stats.opackets;
+ tmp.obytes += txq->stats.obytes;
+ tmp.oerrors += txq->stats.odropped;
+ }
+ *stats = tmp;
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx4_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i != dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = dev->data->rx_queues[i];
+
+ if (rxq)
+ rxq->stats = (struct mlx4_rxq_stats){
+ .idx = rxq->stats.idx,
+ };
+ }
+ for (i = 0; i != dev->data->nb_tx_queues; ++i) {
+ struct txq *txq = dev->data->tx_queues[i];
+
+ if (txq)
+ txq->stats = (struct mlx4_txq_stats){
+ .idx = txq->stats.idx,
+ };
+ }
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ const struct priv *priv = dev->data->dev_private;
+ struct ethtool_cmd edata = {
+ .cmd = ETHTOOL_GSET,
+ };
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ int link_speed = 0;
+
+ if (priv == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ (void)wait_to_complete;
+ if (mlx4_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ return -rte_errno;
+ }
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&edata;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ strerror(rte_errno));
+ return -rte_errno;
+ }
+ link_speed = ethtool_cmd_speed(&edata);
+ if (link_speed == -1)
+ dev_link.link_speed = 0;
+ else
+ dev_link.link_speed = link_speed;
+ dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ dev->data->dev_link = dev_link;
+ return 0;
+}
+
+/**
+ * DPDK callback to get flow control status.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] fc_conf
+ * Flow control output buffer.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_GPAUSEPARAM,
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ ret = rte_errno;
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
+ " failed: %s",
+ strerror(rte_errno));
+ goto out;
+ }
+ fc_conf->autoneg = ethpause.autoneg;
+ if (ethpause.rx_pause && ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (ethpause.rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (ethpause.tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ ret = 0;
+out:
+ assert(ret >= 0);
+ return -ret;
+}
+
+/**
+ * DPDK callback to modify flow control parameters.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] fc_conf
+ * Flow control parameters.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ifreq ifr;
+ struct ethtool_pauseparam ethpause = {
+ .cmd = ETHTOOL_SPAUSEPARAM,
+ };
+ int ret;
+
+ ifr.ifr_data = (void *)&ethpause;
+ ethpause.autoneg = fc_conf->autoneg;
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ ethpause.rx_pause = 1;
+ else
+ ethpause.rx_pause = 0;
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ ethpause.tx_pause = 1;
+ else
+ ethpause.tx_pause = 0;
+ if (mlx4_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ ret = rte_errno;
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ strerror(rte_errno));
+ goto out;
+ }
+ ret = 0;
+out:
+ assert(ret >= 0);
+ return -ret;
+}
+
+/**
+ * DPDK callback to retrieve the received packet types that are recognized
+ * by the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Pointer to an array of recognized packet types if in Rx burst mode,
+ * NULL otherwise.
+ */
+const uint32_t *
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == mlx4_rx_burst)
+ return ptypes;
+ return NULL;
+}
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 925c89c5..8b87b298 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,197 +31,328 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/**
+ * @file
+ * Flow API operations for mlx4 driver.
+ */
+
+#include <arpa/inet.h>
#include <assert.h>
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
-/* Generated configuration header. */
-#include "mlx4_autoconf.h"
-
/* PMD headers. */
#include "mlx4.h"
#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
-/** Static initializer for items. */
-#define ITEMS(...) \
+/** Static initializer for a list of subsequent item types. */
+#define NEXT_ITEM(...) \
(const enum rte_flow_item_type []){ \
__VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
}
-/** Structure to generate a simple graph of layers supported by the NIC. */
-struct mlx4_flow_items {
- /** List of possible actions for these items. */
- const enum rte_flow_action_type *const actions;
- /** Bit-masks corresponding to the possibilities for the item. */
- const void *mask;
- /**
- * Default bit-masks to use when item->mask is not provided. When
- * \default_mask is also NULL, the full supported bit-mask (\mask) is
- * used instead.
- */
- const void *default_mask;
- /** Bit-masks size in bytes. */
+/** Processor structure associated with a flow item. */
+struct mlx4_flow_proc_item {
+ /** Bit-mask for fields supported by this PMD. */
+ const void *mask_support;
+ /** Bit-mask to use when @p item->mask is not provided. */
+ const void *mask_default;
+ /** Size in bytes for @p mask_support and @p mask_default. */
const unsigned int mask_sz;
- /**
- * Check support for a given item.
- *
- * @param item[in]
- * Item specification.
- * @param mask[in]
- * Bit-masks covering supported fields to compare with spec,
- * last and mask in
- * \item.
- * @param size
- * Bit-Mask size in bytes.
- *
- * @return
- * 0 on success, negative value otherwise.
- */
- int (*validate)(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size);
- /**
- * Conversion function from rte_flow to NIC specific flow.
- *
- * @param item
- * rte_flow item to convert.
- * @param default_mask
- * Default bit-masks to use when item->mask is not provided.
- * @param data
- * Internal structure to store the conversion.
- *
- * @return
- * 0 on success, negative value otherwise.
- */
- int (*convert)(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
+ /** Merge a pattern item into a flow rule handle. */
+ int (*merge)(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error);
/** Size in bytes of the destination structure. */
const unsigned int dst_sz;
- /** List of possible following items. */
- const enum rte_flow_item_type *const items;
+ /** List of possible subsequent items. */
+ const enum rte_flow_item_type *const next_item;
};
-struct rte_flow_drop {
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
+/** Shared resources for drop flow rules. */
+struct mlx4_drop {
+ struct ibv_qp *qp; /**< QP target. */
+ struct ibv_cq *cq; /**< CQ associated with above QP. */
+ struct priv *priv; /**< Back pointer to private data. */
+ uint32_t refcnt; /**< Reference count. */
};
-/** Valid action for this PMD. */
-static const enum rte_flow_action_type valid_actions[] = {
- RTE_FLOW_ACTION_TYPE_DROP,
- RTE_FLOW_ACTION_TYPE_QUEUE,
- RTE_FLOW_ACTION_TYPE_RSS,
- RTE_FLOW_ACTION_TYPE_END,
-};
+/**
+ * Convert DPDK RSS hash fields to their Verbs equivalent.
+ *
+ * @param rss_hf
+ * Hash fields in DPDK format (see struct rte_eth_rss_conf).
+ *
+ * @return
+ * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
+ * otherwise and rte_errno is set.
+ */
+static uint64_t
+mlx4_conv_rss_hf(uint64_t rss_hf)
+{
+ enum { IPV4, IPV6, TCP, UDP, };
+ const uint64_t in[] = {
+ [IPV4] = (ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ [IPV6] = (ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_NONFRAG_IPV6_OTHER |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_IPV6_TCP_EX |
+ ETH_RSS_IPV6_UDP_EX),
+ [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX),
+ /*
+ * UDP support is temporarily disabled due to an
+ * implementation issue in the kernel.
+ */
+ [UDP] = 0,
+ };
+ const uint64_t out[RTE_DIM(in)] = {
+ [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
+ [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
+ [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
+ [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
+ };
+ uint64_t seen = 0;
+ uint64_t conv = 0;
+ unsigned int i;
+
+ for (i = 0; i != RTE_DIM(in); ++i)
+ if (rss_hf & in[i]) {
+ seen |= rss_hf & in[i];
+ conv |= out[i];
+ }
+ if (!(rss_hf & ~seen))
+ return conv;
+ rte_errno = ENOTSUP;
+ return (uint64_t)-1;
+}
/**
- * Convert Ethernet item to Verbs specification.
+ * Merge Ethernet pattern item into flow rule handle.
*
- * @param item[in]
- * Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks, except in the specific case of matching
+ * all multicast traffic (@p spec->dst and @p mask->dst equal to
+ * 01:00:00:00:00:00).
+ * - Not providing @p item->spec or providing an empty @p mask->dst is
+ * *only* supported if the rule doesn't specify additional matching
+ * criteria (i.e. rule is promiscuous-like).
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_create_eth(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx4_flow_merge_eth(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_eth *spec = item->spec;
- const struct rte_flow_item_eth *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ const struct rte_flow_item_eth *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
struct ibv_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ const char *msg;
unsigned int i;
+ if (!mask) {
+ flow->promisc = 1;
+ } else {
+ uint32_t sum_dst = 0;
+ uint32_t sum_src = 0;
+
+ for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
+ sum_dst += mask->dst.addr_bytes[i];
+ sum_src += mask->src.addr_bytes[i];
+ }
+ if (sum_src) {
+ msg = "mlx4 does not support source MAC matching";
+ goto error;
+ } else if (!sum_dst) {
+ flow->promisc = 1;
+ } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
+ if (!(spec->dst.addr_bytes[0] & 1)) {
+ msg = "mlx4 does not support the explicit"
+ " exclusion of all multicast traffic";
+ goto error;
+ }
+ flow->allmulti = 1;
+ } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
+ msg = "mlx4 does not support matching partial"
+ " Ethernet fields";
+ goto error;
+ }
+ }
+ if (!flow->ibv_attr)
+ return 0;
+ if (flow->promisc) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
+ return 0;
+ }
+ if (flow->allmulti) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
+ return 0;
+ }
++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 2;
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*eth = (struct ibv_flow_spec_eth) {
.type = IBV_FLOW_SPEC_ETH,
- .size = eth_size,
+ .size = sizeof(*eth),
};
- if (!spec) {
- flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
- return 0;
- }
- if (!mask)
- mask = default_mask;
memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
/* Remove unwanted bits from values. */
for (i = 0; i < ETHER_ADDR_LEN; ++i) {
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
- eth->val.src_mac[i] &= eth->mask.src_mac[i];
}
return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
- * Convert VLAN item to Verbs specification.
+ * Merge VLAN pattern item into flow rule handle.
*
- * @param item[in]
- * Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
+ * empty @p item->mask would also include non-VLAN traffic. Doing so is
+ * therefore unsupported.
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_create_vlan(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx4_flow_merge_vlan(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = item->spec;
- const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ const struct rte_flow_item_vlan *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
struct ibv_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ const char *msg;
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
- if (!spec)
+ if (!mask || !mask->tci) {
+ msg = "mlx4 cannot match all VLAN traffic while excluding"
+ " non-VLAN traffic, TCI VID must be specified";
+ goto error;
+ }
+ if (mask->tci != RTE_BE16(0x0fff)) {
+ msg = "mlx4 does not support partial TCI VID matching";
+ goto error;
+ }
+ if (!flow->ibv_attr)
return 0;
- if (!mask)
- mask = default_mask;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
+ sizeof(*eth));
eth->val.vlan_tag = spec->tci;
eth->mask.vlan_tag = mask->tci;
eth->val.vlan_tag &= eth->mask.vlan_tag;
return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
- * Convert IPv4 item to Verbs specification.
+ * Merge IPv4 pattern item into flow rule handle.
*
- * @param item[in]
- * Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_create_ipv4(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx4_flow_merge_ipv4(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
- const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ const struct rte_flow_item_ipv4 *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
struct ibv_flow_spec_ipv4 *ipv4;
- unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
+ const char *msg;
+ if (mask &&
+ ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
+ (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
+ msg = "mlx4 does not support matching partial IPv4 fields";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 1;
- ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*ipv4 = (struct ibv_flow_spec_ipv4) {
.type = IBV_FLOW_SPEC_IPV4,
- .size = ipv4_size,
+ .size = sizeof(*ipv4),
};
if (!spec)
return 0;
@@ -229,8 +360,6 @@ mlx4_flow_create_ipv4(const struct rte_flow_item *item,
.src_ip = spec->hdr.src_addr,
.dst_ip = spec->hdr.dst_addr,
};
- if (!mask)
- mask = default_mask;
ipv4->mask = (struct ibv_flow_ipv4_filter) {
.src_ip = mask->hdr.src_addr,
.dst_ip = mask->hdr.dst_addr,
@@ -239,528 +368,504 @@ mlx4_flow_create_ipv4(const struct rte_flow_item *item,
ipv4->val.src_ip &= ipv4->mask.src_ip;
ipv4->val.dst_ip &= ipv4->mask.dst_ip;
return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
- * Convert UDP item to Verbs specification.
+ * Merge UDP pattern item into flow rule handle.
*
- * @param item[in]
- * Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_create_udp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx4_flow_merge_udp(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = item->spec;
- const struct rte_flow_item_udp *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ const struct rte_flow_item_udp *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
struct ibv_flow_spec_tcp_udp *udp;
- unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const char *msg;
+ if (mask &&
+ ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
+ (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
+ msg = "mlx4 does not support matching partial UDP fields";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*udp = (struct ibv_flow_spec_tcp_udp) {
.type = IBV_FLOW_SPEC_UDP,
- .size = udp_size,
+ .size = sizeof(*udp),
};
if (!spec)
return 0;
udp->val.dst_port = spec->hdr.dst_port;
udp->val.src_port = spec->hdr.src_port;
- if (!mask)
- mask = default_mask;
udp->mask.dst_port = mask->hdr.dst_port;
udp->mask.src_port = mask->hdr.src_port;
/* Remove unwanted bits from values. */
udp->val.src_port &= udp->mask.src_port;
udp->val.dst_port &= udp->mask.dst_port;
return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
- * Convert TCP item to Verbs specification.
+ * Merge TCP pattern item into flow rule handle.
*
- * @param item[in]
- * Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * Additional mlx4-specific constraints on supported fields:
+ *
+ * - No support for partial masks.
+ *
+ * @param[in, out] flow
+ * Flow rule handle to update.
+ * @param[in] item
+ * Pattern item to merge.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_create_tcp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx4_flow_merge_tcp(struct rte_flow *flow,
+ const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
const struct rte_flow_item_tcp *spec = item->spec;
- const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ const struct rte_flow_item_tcp *mask =
+ spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
struct ibv_flow_spec_tcp_udp *tcp;
- unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const char *msg;
+ if (mask &&
+ ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
+ (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
+ msg = "mlx4 does not support matching partial TCP fields";
+ goto error;
+ }
+ if (!flow->ibv_attr)
+ return 0;
++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
*tcp = (struct ibv_flow_spec_tcp_udp) {
.type = IBV_FLOW_SPEC_TCP,
- .size = tcp_size,
+ .size = sizeof(*tcp),
};
if (!spec)
return 0;
tcp->val.dst_port = spec->hdr.dst_port;
tcp->val.src_port = spec->hdr.src_port;
- if (!mask)
- mask = default_mask;
tcp->mask.dst_port = mask->hdr.dst_port;
tcp->mask.src_port = mask->hdr.src_port;
/* Remove unwanted bits from values. */
tcp->val.src_port &= tcp->mask.src_port;
tcp->val.dst_port &= tcp->mask.dst_port;
return 0;
+error:
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg);
}
/**
- * Check support for a given item.
+ * Perform basic sanity checks on a pattern item.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param mask[in]
- * Bit-masks covering supported fields to compare with spec, last and mask in
- * \item.
- * @param size
- * Bit-Mask size in bytes.
+ * @param[in] proc
+ * Associated item-processing object.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, negative value otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx4_flow_item_validate(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
+mlx4_flow_item_check(const struct rte_flow_item *item,
+ const struct mlx4_flow_proc_item *proc,
+ struct rte_flow_error *error)
{
- int ret = 0;
+ const uint8_t *mask;
+ unsigned int i;
+ /* item->last and item->mask cannot exist without item->spec. */
if (!item->spec && (item->mask || item->last))
- return -1;
- if (item->spec && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->spec;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->last && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->last;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->spec && item->last) {
- uint8_t spec[size];
- uint8_t last[size];
- const uint8_t *apply = mask;
- unsigned int i;
-
- if (item->mask)
- apply = item->mask;
- for (i = 0; i < size; ++i) {
- spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
- last[i] = ((const uint8_t *)item->last)[i] & apply[i];
- }
- ret = memcmp(spec, last, size);
- }
- return ret;
-}
-
-static int
-mlx4_flow_validate_eth(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- if (item->mask) {
- const struct rte_flow_item_eth *mask = item->mask;
-
- if (mask->dst.addr_bytes[0] != 0xff ||
- mask->dst.addr_bytes[1] != 0xff ||
- mask->dst.addr_bytes[2] != 0xff ||
- mask->dst.addr_bytes[3] != 0xff ||
- mask->dst.addr_bytes[4] != 0xff ||
- mask->dst.addr_bytes[5] != 0xff)
- return -1;
- }
- return mlx4_flow_item_validate(item, mask, size);
-}
-
-static int
-mlx4_flow_validate_vlan(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- if (item->mask) {
- const struct rte_flow_item_vlan *mask = item->mask;
-
- if (mask->tci != 0 &&
- ntohs(mask->tci) != 0x0fff)
- return -1;
- }
- return mlx4_flow_item_validate(item, mask, size);
-}
-
-static int
-mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- if (item->mask) {
- const struct rte_flow_item_ipv4 *mask = item->mask;
-
- if (mask->hdr.src_addr != 0 &&
- mask->hdr.src_addr != 0xffffffff)
- return -1;
- if (mask->hdr.dst_addr != 0 &&
- mask->hdr.dst_addr != 0xffffffff)
- return -1;
- }
- return mlx4_flow_item_validate(item, mask, size);
-}
-
-static int
-mlx4_flow_validate_udp(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- if (item->mask) {
- const struct rte_flow_item_udp *mask = item->mask;
-
- if (mask->hdr.src_port != 0 &&
- mask->hdr.src_port != 0xffff)
- return -1;
- if (mask->hdr.dst_port != 0 &&
- mask->hdr.dst_port != 0xffff)
- return -1;
- }
- return mlx4_flow_item_validate(item, mask, size);
-}
-
-static int
-mlx4_flow_validate_tcp(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- if (item->mask) {
- const struct rte_flow_item_tcp *mask = item->mask;
-
- if (mask->hdr.src_port != 0 &&
- mask->hdr.src_port != 0xffff)
- return -1;
- if (mask->hdr.dst_port != 0 &&
- mask->hdr.dst_port != 0xffff)
- return -1;
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without a"
+ " corresponding \"spec\"");
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return 0;
+ mask = item->mask ?
+ (const uint8_t *)item->mask :
+ (const uint8_t *)proc->mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside proc->mask_support.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != proc->mask_sz; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
+ ((const uint8_t *)proc->mask_support)[i])
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "unsupported field found in \"mask\"");
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i]))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range between \"spec\" and \"last\""
+ " is larger than \"mask\"");
}
- return mlx4_flow_item_validate(item, mask, size);
+ return 0;
}
/** Graph of supported items and associated actions. */
-static const struct mlx4_flow_items mlx4_flow_items[] = {
+static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
[RTE_FLOW_ITEM_TYPE_END] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
},
[RTE_FLOW_ITEM_TYPE_ETH] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
- RTE_FLOW_ITEM_TYPE_IPV4),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_eth){
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4),
+ .mask_support = &(const struct rte_flow_item_eth){
+ /* Only destination MAC can be matched. */
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
- .default_mask = &rte_flow_item_eth_mask,
+ .mask_default = &rte_flow_item_eth_mask,
.mask_sz = sizeof(struct rte_flow_item_eth),
- .validate = mlx4_flow_validate_eth,
- .convert = mlx4_flow_create_eth,
+ .merge = mlx4_flow_merge_eth,
.dst_sz = sizeof(struct ibv_flow_spec_eth),
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vlan){
- /* rte_flow_item_vlan_mask is invalid for mlx4. */
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- .tci = 0x0fff,
-#else
- .tci = 0xff0f,
-#endif
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
+ .mask_support = &(const struct rte_flow_item_vlan){
+ /* Only TCI VID matching is supported. */
+ .tci = RTE_BE16(0x0fff),
},
+ .mask_default = &rte_flow_item_vlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vlan),
- .validate = mlx4_flow_validate_vlan,
- .convert = mlx4_flow_create_vlan,
+ .merge = mlx4_flow_merge_vlan,
.dst_sz = 0,
},
[RTE_FLOW_ITEM_TYPE_IPV4] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_ipv4){
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask_support = &(const struct rte_flow_item_ipv4){
.hdr = {
- .src_addr = -1,
- .dst_addr = -1,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
},
},
- .default_mask = &rte_flow_item_ipv4_mask,
+ .mask_default = &rte_flow_item_ipv4_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv4),
- .validate = mlx4_flow_validate_ipv4,
- .convert = mlx4_flow_create_ipv4,
+ .merge = mlx4_flow_merge_ipv4,
.dst_sz = sizeof(struct ibv_flow_spec_ipv4),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_udp){
+ .mask_support = &(const struct rte_flow_item_udp){
.hdr = {
- .src_port = -1,
- .dst_port = -1,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
},
- .default_mask = &rte_flow_item_udp_mask,
+ .mask_default = &rte_flow_item_udp_mask,
.mask_sz = sizeof(struct rte_flow_item_udp),
- .validate = mlx4_flow_validate_udp,
- .convert = mlx4_flow_create_udp,
+ .merge = mlx4_flow_merge_udp,
.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_tcp){
+ .mask_support = &(const struct rte_flow_item_tcp){
.hdr = {
- .src_port = -1,
- .dst_port = -1,
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
},
},
- .default_mask = &rte_flow_item_tcp_mask,
+ .mask_default = &rte_flow_item_tcp_mask,
.mask_sz = sizeof(struct rte_flow_item_tcp),
- .validate = mlx4_flow_validate_tcp,
- .convert = mlx4_flow_create_tcp,
+ .merge = mlx4_flow_merge_tcp,
.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
};
/**
- * Validate a flow supported by the NIC.
+ * Make sure a flow rule is supported and initialize associated structure.
*
* @param priv
* Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
- * @param[in] items
+ * @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] flow
- * Flow structure to update.
+ * @param[in, out] addr
+ * Buffer where the resulting flow rule handle pointer must be stored.
+ * If NULL, stop processing after validation stage.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_validate(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx4_flow *flow)
+mlx4_flow_prepare(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow **addr)
{
- const struct mlx4_flow_items *cur_item = mlx4_flow_items;
- struct mlx4_flow_action action = {
- .queue = 0,
- .drop = 0,
- };
-
- (void)priv;
- if (attr->group) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups are not supported");
- return -rte_errno;
- }
- if (attr->priority) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priorities are not supported");
- return -rte_errno;
- }
- if (attr->egress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- return -rte_errno;
- }
- if (!attr->ingress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "only ingress is supported");
- return -rte_errno;
- }
- /* Go over items list. */
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx4_flow_items *token = NULL;
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ const struct mlx4_flow_proc_item *proc;
+ struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
+ struct rte_flow *flow = &temp;
+ const char *msg = NULL;
+
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "groups are not supported");
+ if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "maximum priority level is "
+ MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "egress is not supported");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL, "only ingress is supported");
+fill:
+ proc = mlx4_flow_proc_item_list;
+ /* Go over pattern. */
+ for (item = pattern; item->type; ++item) {
+ const struct mlx4_flow_proc_item *next = NULL;
unsigned int i;
int err;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
+ flow->internal = 1;
continue;
- /*
- * The nic can support patterns with NULL eth spec only
- * if eth is a single item in a rule.
- */
- if (!items->spec &&
- items->type == RTE_FLOW_ITEM_TYPE_ETH) {
- const struct rte_flow_item *next = items + 1;
-
- if (next->type != RTE_FLOW_ITEM_TYPE_END) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "the rule requires"
- " an Ethernet spec");
- return -rte_errno;
- }
}
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx4_flow_items[items->type];
+ if (flow->promisc || flow->allmulti) {
+ msg = "mlx4 does not support additional matching"
+ " criteria combined with indiscriminate"
+ " matching on Ethernet headers";
+ goto exit_item_not_supported;
+ }
+ for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
+ if (proc->next_item[i] == item->type) {
+ next = &mlx4_flow_proc_item_list[item->type];
break;
}
}
- if (!token)
- goto exit_item_not_supported;
- cur_item = token;
- err = cur_item->validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
+ if (!next)
goto exit_item_not_supported;
- if (flow->ibv_attr && cur_item->convert) {
- err = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- flow);
+ proc = next;
+ /*
+ * Perform basic sanity checks only once, while handle is
+ * not allocated.
+ */
+ if (flow == &temp) {
+ err = mlx4_flow_item_check(item, proc, error);
if (err)
- goto exit_item_not_supported;
+ return err;
}
- flow->offset += cur_item->dst_sz;
+ if (proc->merge) {
+ err = proc->merge(flow, item, proc, error);
+ if (err)
+ return err;
+ }
+ flow->ibv_attr_size += proc->dst_sz;
}
- /* Go over actions list */
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- const struct rte_flow_action_queue *queue =
- (const struct rte_flow_action_queue *)
- actions->conf;
+ /* Go over actions list. */
+ for (action = actions; action->type; ++action) {
+ switch (action->type) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_eth_rss_conf *rss_conf;
+ unsigned int i;
- if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ flow->drop = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ if (flow->rss)
+ break;
+ queue = action->conf;
+ if (queue->index >= priv->dev->data->nb_rx_queues) {
+ msg = "queue target index beyond number of"
+ " configured Rx queues";
goto exit_action_not_supported;
- action.queue = 1;
- action.queues_n = 1;
- action.queues[0] = queue->index;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
- int i;
- int ierr;
- const struct rte_flow_action_rss *rss =
- (const struct rte_flow_action_rss *)
- actions->conf;
-
- if (!priv->hw_rss) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "RSS cannot be used with "
- "the current configuration");
- return -rte_errno;
}
- if (!priv->isolated) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "RSS cannot be used without "
- "isolated mode");
- return -rte_errno;
+ flow->rss = mlx4_rss_get
+ (priv, 0, mlx4_rss_hash_key_default, 1,
+ &queue->index);
+ if (!flow->rss) {
+ msg = "not enough resources for additional"
+ " single-queue RSS context";
+ goto exit_action_not_supported;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (flow->rss)
+ break;
+ rss = action->conf;
+ /* Default RSS configuration if none is provided. */
+ rss_conf =
+ rss->rss_conf ?
+ rss->rss_conf :
+ &(struct rte_eth_rss_conf){
+ .rss_key = mlx4_rss_hash_key_default,
+ .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rss_hf = (ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP),
+ };
+ /* Sanity checks. */
+ for (i = 0; i < rss->num; ++i)
+ if (rss->queue[i] >=
+ priv->dev->data->nb_rx_queues)
+ break;
+ if (i != rss->num) {
+ msg = "queue index target beyond number of"
+ " configured Rx queues";
+ goto exit_action_not_supported;
}
if (!rte_is_power_of_2(rss->num)) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "the number of queues "
- "should be power of two");
- return -rte_errno;
+ msg = "for RSS, mlx4 requires the number of"
+ " queues to be a power of two";
+ goto exit_action_not_supported;
}
- if (priv->max_rss_tbl_sz < rss->num) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "the number of queues "
- "is too large");
- return -rte_errno;
+ if (rss_conf->rss_key_len !=
+ sizeof(flow->rss->key)) {
+ msg = "mlx4 supports exactly one RSS hash key"
+ " length: "
+ MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
+ goto exit_action_not_supported;
}
- /* checking indexes array */
- ierr = 0;
- for (i = 0; i < rss->num; ++i) {
- int j;
- if (rss->queue[i] >= priv->rxqs_n)
- ierr = 1;
- /*
- * Prevent the user from specifying
- * the same queue twice in the RSS array.
- */
- for (j = i + 1; j < rss->num && !ierr; ++j)
- if (rss->queue[j] == rss->queue[i])
- ierr = 1;
- if (ierr) {
- rte_flow_error_set(
- error,
- ENOTSUP,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL,
- "RSS action only supports "
- "unique queue indices "
- "in a list");
- return -rte_errno;
- }
+ for (i = 1; i < rss->num; ++i)
+ if (rss->queue[i] - rss->queue[i - 1] != 1)
+ break;
+ if (i != rss->num) {
+ msg = "mlx4 requires RSS contexts to use"
+ " consecutive queue indices only";
+ goto exit_action_not_supported;
}
- action.queue = 1;
- action.queues_n = rss->num;
- for (i = 0; i < rss->num; ++i)
- action.queues[i] = rss->queue[i];
- } else {
+ if (rss->queue[0] % rss->num) {
+ msg = "mlx4 requires the first queue of a RSS"
+ " context to be aligned on a multiple"
+ " of the context size";
+ goto exit_action_not_supported;
+ }
+ flow->rss = mlx4_rss_get
+ (priv, mlx4_conv_rss_hf(rss_conf->rss_hf),
+ rss_conf->rss_key, rss->num, rss->queue);
+ if (!flow->rss) {
+ msg = "either invalid parameters or not enough"
+ " resources for additional multi-queue"
+ " RSS context";
+ goto exit_action_not_supported;
+ }
+ break;
+ default:
goto exit_action_not_supported;
}
}
- if (!action.queue && !action.drop) {
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "no valid action");
- return -rte_errno;
+ if (!flow->rss && !flow->drop)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "no valid action");
+ /* Validation ends here. */
+ if (!addr) {
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
+ return 0;
}
+ if (flow == &temp) {
+ /* Allocate proper handle based on collected data. */
+ const struct mlx4_malloc_vec vec[] = {
+ {
+ .align = alignof(struct rte_flow),
+ .size = sizeof(*flow),
+ .addr = (void **)&flow,
+ },
+ {
+ .align = alignof(struct ibv_flow_attr),
+ .size = temp.ibv_attr_size,
+ .addr = (void **)&temp.ibv_attr,
+ },
+ };
+
+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
+ return rte_flow_error_set
+ (error, -rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule handle allocation failure");
+ /* Most fields will be updated by second pass. */
+ *flow = (struct rte_flow){
+ .ibv_attr = temp.ibv_attr,
+ .ibv_attr_size = sizeof(*flow->ibv_attr),
+ .rss = temp.rss,
+ };
+ *flow->ibv_attr = (struct ibv_flow_attr){
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(*flow->ibv_attr),
+ .priority = attr->priority,
+ .port = priv->port,
+ };
+ goto fill;
+ }
+ *addr = flow;
return 0;
exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, msg ? msg : "item not supported");
exit_action_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "action not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, msg ? msg : "action not supported");
}
/**
@@ -769,552 +874,691 @@ exit_action_not_supported:
* @see rte_flow_validate()
* @see rte_flow_ops
*/
-int
+static int
mlx4_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
+ const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- int ret;
- struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
-
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
- priv_unlock(priv);
- return ret;
-}
-
-/**
- * Destroy a drop queue.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-mlx4_flow_destroy_drop_queue(struct priv *priv)
-{
- if (priv->flow_drop_queue) {
- struct rte_flow_drop *fdq = priv->flow_drop_queue;
- priv->flow_drop_queue = NULL;
- claim_zero(ibv_destroy_qp(fdq->qp));
- claim_zero(ibv_destroy_cq(fdq->cq));
- rte_free(fdq);
- }
+ return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
}
/**
- * Create a single drop queue for all drop flows.
+ * Get a drop flow rule resources instance.
*
* @param priv
* Pointer to private structure.
*
* @return
- * 0 on success, negative value otherwise.
+ * Pointer to drop flow resources on success, NULL otherwise and rte_errno
+ * is set.
*/
-static int
-mlx4_flow_create_drop_queue(struct priv *priv)
+static struct mlx4_drop *
+mlx4_drop_get(struct priv *priv)
{
- struct ibv_qp *qp;
- struct ibv_cq *cq;
- struct rte_flow_drop *fdq;
+ struct mlx4_drop *drop = priv->drop;
- fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
- if (!fdq) {
- ERROR("Cannot allocate memory for drop struct");
- goto err;
- }
- cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
- if (!cq) {
- ERROR("Cannot create drop CQ");
- goto err_create_cq;
- }
- qp = ibv_exp_create_qp(priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .send_cq = cq,
- .recv_cq = cq,
- .cap = {
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT,
- .pd = priv->pd,
- .port_num = priv->port,
- });
- if (!qp) {
- ERROR("Cannot create drop QP");
- goto err_create_qp;
+ if (drop) {
+ assert(drop->refcnt);
+ assert(drop->priv == priv);
+ ++drop->refcnt;
+ return drop;
}
- *fdq = (struct rte_flow_drop){
- .qp = qp,
- .cq = cq,
+ drop = rte_malloc(__func__, sizeof(*drop), 0);
+ if (!drop)
+ goto error;
+ *drop = (struct mlx4_drop){
+ .priv = priv,
+ .refcnt = 1,
};
- priv->flow_drop_queue = fdq;
- return 0;
-err_create_qp:
- claim_zero(ibv_destroy_cq(cq));
-err_create_cq:
- rte_free(fdq);
-err:
- return -1;
+ drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!drop->cq)
+ goto error;
+ drop->qp = ibv_create_qp(priv->pd,
+ &(struct ibv_qp_init_attr){
+ .send_cq = drop->cq,
+ .recv_cq = drop->cq,
+ .qp_type = IBV_QPT_RAW_PACKET,
+ });
+ if (!drop->qp)
+ goto error;
+ priv->drop = drop;
+ return drop;
+error:
+ if (drop->qp)
+ claim_zero(ibv_destroy_qp(drop->qp));
+ if (drop->cq)
+ claim_zero(ibv_destroy_cq(drop->cq));
+ if (drop)
+ rte_free(drop);
+ rte_errno = ENOMEM;
+ return NULL;
}
/**
- * Get RSS parent rxq structure for given queues.
+ * Give back a drop flow rule resources instance.
*
- * Creates a new or returns an existed one.
- *
- * @param priv
- * Pointer to private structure.
- * @param queues
- * queues indices array, NULL in default RSS case.
- * @param children_n
- * the size of queues array.
- *
- * @return
- * Pointer to a parent rxq structure, NULL on failure.
+ * @param drop
+ * Pointer to drop flow rule resources.
*/
-static struct rxq *
-priv_parent_get(struct priv *priv,
- uint16_t queues[],
- uint16_t children_n,
- struct rte_flow_error *error)
+static void
+mlx4_drop_put(struct mlx4_drop *drop)
{
- unsigned int i;
- struct rxq *parent;
-
- for (parent = LIST_FIRST(&priv->parents);
- parent;
- parent = LIST_NEXT(parent, next)) {
- unsigned int same = 0;
- unsigned int overlap = 0;
-
- /*
- * Find out whether an appropriate parent queue already exists
- * and can be reused, otherwise make sure there are no overlaps.
- */
- for (i = 0; i < children_n; ++i) {
- unsigned int j;
-
- for (j = 0; j < parent->rss.queues_n; ++j) {
- if (parent->rss.queues[j] != queues[i])
- continue;
- ++overlap;
- if (i == j)
- ++same;
- }
- }
- if (same == children_n &&
- children_n == parent->rss.queues_n)
- return parent;
- else if (overlap)
- goto error;
- }
- /* Exclude the cases when some QPs were created without RSS */
- for (i = 0; i < children_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[queues[i]];
- if (rxq->qp)
- goto error;
- }
- parent = priv_parent_create(priv, queues, children_n);
- if (!parent) {
- rte_flow_error_set(error,
- ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "flow rule creation failure");
- return NULL;
- }
- return parent;
-
-error:
- rte_flow_error_set(error,
- EEXIST,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "sharing a queue between several"
- " RSS groups is not supported");
- return NULL;
+ assert(drop->refcnt);
+ if (--drop->refcnt)
+ return;
+ drop->priv->drop = NULL;
+ claim_zero(ibv_destroy_qp(drop->qp));
+ claim_zero(ibv_destroy_cq(drop->cq));
+ rte_free(drop);
}
/**
- * Complete flow rule creation.
+ * Toggle a configured flow rule.
*
* @param priv
* Pointer to private structure.
- * @param ibv_attr
- * Verbs flow attributes.
- * @param action
- * Target action structure.
+ * @param flow
+ * Flow rule handle to toggle.
+ * @param enable
+ * Whether associated Verbs flow must be created or removed.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow if the rule could be created.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct rte_flow *
-priv_flow_create_action_queue(struct priv *priv,
- struct ibv_flow_attr *ibv_attr,
- struct mlx4_flow_action *action,
- struct rte_flow_error *error)
+static int
+mlx4_flow_toggle(struct priv *priv,
+ struct rte_flow *flow,
+ int enable,
+ struct rte_flow_error *error)
{
- struct ibv_qp *qp;
- struct rte_flow *rte_flow;
- struct rxq *rxq_parent = NULL;
+ struct ibv_qp *qp = NULL;
+ const char *msg;
+ int err;
- assert(priv->pd);
- assert(priv->ctx);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
+ if (!enable) {
+ if (!flow->ibv_flow)
+ return 0;
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ return 0;
}
- if (action->drop) {
- qp = priv->flow_drop_queue ? priv->flow_drop_queue->qp : NULL;
- } else {
- int ret;
+ assert(flow->ibv_attr);
+ if (!flow->internal &&
+ !priv->isolated &&
+ flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
+ if (flow->ibv_flow) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ }
+ err = EACCES;
+ msg = ("priority level "
+ MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
+ " is reserved when not in isolated mode");
+ goto error;
+ }
+ if (flow->rss) {
+ struct mlx4_rss *rss = flow->rss;
+ int missing = 0;
unsigned int i;
- struct rxq *rxq = NULL;
- if (action->queues_n > 1) {
- rxq_parent = priv_parent_get(priv, action->queues,
- action->queues_n, error);
- if (!rxq_parent)
- goto error;
+ /* Stop at the first nonexistent target queue. */
+ for (i = 0; i != rss->queues; ++i)
+ if (rss->queue_id[i] >=
+ priv->dev->data->nb_rx_queues ||
+ !priv->dev->data->rx_queues[rss->queue_id[i]]) {
+ missing = 1;
+ break;
+ }
+ if (flow->ibv_flow) {
+ if (missing ^ !flow->drop)
+ return 0;
+ /* Verbs flow needs updating. */
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else
+ mlx4_rss_detach(rss);
}
- for (i = 0; i < action->queues_n; ++i) {
- rxq = (*priv->rxqs)[action->queues[i]];
- /*
- * In case of isolated mode we postpone
- * ibv receive queue creation till the first
- * rte_flow rule will be applied on that queue.
- */
- if (!rxq->qp) {
- assert(priv->isolated);
- ret = rxq_create_qp(rxq, rxq->elts_n,
- 0, 0, rxq_parent);
- if (ret) {
- rte_flow_error_set(
- error,
- ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL,
- "flow rule creation failure");
- goto error;
- }
+ if (!missing) {
+ err = mlx4_rss_attach(rss);
+ if (err) {
+ err = -err;
+ msg = "cannot create indirection table or hash"
+ " QP to associate flow rule with";
+ goto error;
}
+ qp = rss->qp;
}
- qp = action->queues_n > 1 ? rxq_parent->qp : rxq->qp;
- rte_flow->qp = qp;
+ /* A missing target queue drops traffic implicitly. */
+ flow->drop = missing;
}
- rte_flow->ibv_attr = ibv_attr;
- if (!priv->started)
- return rte_flow;
- rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- goto error;
+ if (flow->drop) {
+ mlx4_drop_get(priv);
+ if (!priv->drop) {
+ err = rte_errno;
+ msg = "resources for drop flow rule cannot be created";
+ goto error;
+ }
+ qp = priv->drop->qp;
}
- return rte_flow;
-
+ assert(qp);
+ if (flow->ibv_flow)
+ return 0;
+ flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ if (flow->ibv_flow)
+ return 0;
+ if (flow->drop)
+ mlx4_drop_put(priv->drop);
+ else if (flow->rss)
+ mlx4_rss_detach(flow->rss);
+ err = errno;
+ msg = "flow rule rejected by device";
error:
- if (rxq_parent)
- rxq_parent_cleanup(rxq_parent);
- rte_free(rte_flow);
- return NULL;
+ return rte_flow_error_set
+ (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
}
/**
- * Convert a flow.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] items
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Create a flow.
*
- * @return
- * A flow on success, NULL otherwise.
+ * @see rte_flow_create()
+ * @see rte_flow_ops
*/
static struct rte_flow *
-priv_flow_create(struct priv *priv,
+mlx4_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
+ const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
- struct mlx4_flow_action action;
- struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
int err;
- err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
if (err)
return NULL;
- flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- if (!flow.ibv_attr) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate ibv_attr memory");
- return NULL;
- }
- flow.offset = sizeof(struct ibv_flow_attr);
- *flow.ibv_attr = (struct ibv_flow_attr){
- .comp_mask = 0,
- .type = IBV_FLOW_ATTR_NORMAL,
- .size = sizeof(struct ibv_flow_attr),
- .priority = attr->priority,
- .num_of_specs = 0,
- .port = priv->port,
- .flags = 0,
- };
- claim_zero(priv_flow_validate(priv, attr, items, actions,
- error, &flow));
- action = (struct mlx4_flow_action){
- .queue = 0,
- .drop = 0,
- };
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- action.queue = 1;
- action.queues_n = 1;
- action.queues[0] =
- ((const struct rte_flow_action_queue *)
- actions->conf)->index;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action.drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
- unsigned int i;
- const struct rte_flow_action_rss *rss =
- (const struct rte_flow_action_rss *)
- actions->conf;
+ err = mlx4_flow_toggle(priv, flow, priv->started, error);
+ if (!err) {
+ struct rte_flow *curr = LIST_FIRST(&priv->flows);
- action.queue = 1;
- action.queues_n = rss->num;
- for (i = 0; i < rss->num; ++i)
- action.queues[i] = rss->queue[i];
+ /* New rules are inserted after internal ones. */
+ if (!curr || !curr->internal) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
} else {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "unsupported action");
- goto exit;
+ while (LIST_NEXT(curr, next) &&
+ LIST_NEXT(curr, next)->internal)
+ curr = LIST_NEXT(curr, next);
+ LIST_INSERT_AFTER(curr, flow, next);
}
+ return flow;
}
- rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
- &action, error);
- if (rte_flow)
- return rte_flow;
-exit:
- rte_free(flow.ibv_attr);
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
+ rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ error->message);
+ rte_free(flow);
return NULL;
}
/**
- * Create a flow.
+ * Configure isolated mode.
*
- * @see rte_flow_create()
- * @see rte_flow_ops
- */
-struct rte_flow *
-mlx4_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
- struct rte_flow *flow;
-
- priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- LIST_INSERT_HEAD(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
- priv_unlock(priv);
- return flow;
-}
-
-/**
* @see rte_flow_isolate()
- *
- * Must be done before calling dev_configure().
- *
- * @param dev
- * Pointer to the ethernet device structure.
- * @param enable
- * Nonzero to enter isolated mode, attempt to leave it otherwise.
- * @param[out] error
- * Perform verbose error reporting if not NULL. PMDs initialize this
- * structure in case of error only.
- *
- * @return
- * 0 on success, a negative value on error.
+ * @see rte_flow_ops
*/
-int
+static int
mlx4_flow_isolate(struct rte_eth_dev *dev,
int enable,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
- if (priv->rxqs) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "isolated mode must be set"
- " before configuring the device");
- priv_unlock(priv);
+ if (!!enable == !!priv->isolated)
+ return 0;
+ priv->isolated = !!enable;
+ if (mlx4_flow_sync(priv, error)) {
+ priv->isolated = !enable;
return -rte_errno;
}
- priv->isolated = !!enable;
- priv_unlock(priv);
return 0;
}
/**
- * Destroy a flow.
+ * Destroy a flow rule.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] flow
- * Flow to destroy.
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
*/
-static void
-priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
+static int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- (void)priv;
+ struct priv *priv = dev->data->dev_private;
+ int err = mlx4_flow_toggle(priv, flow, 0, error);
+
+ if (err)
+ return err;
LIST_REMOVE(flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
- rte_free(flow->ibv_attr);
- DEBUG("Flow destroyed %p", (void *)flow);
+ if (flow->rss)
+ mlx4_rss_put(flow->rss);
rte_free(flow);
+ return 0;
}
/**
- * Destroy a flow.
+ * Destroy user-configured flow rules.
*
- * @see rte_flow_destroy()
+ * This function skips internal flows rules.
+ *
+ * @see rte_flow_flush()
* @see rte_flow_ops
*/
-int
-mlx4_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+static int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = LIST_FIRST(&priv->flows);
+
+ while (flow) {
+ struct rte_flow *next = LIST_NEXT(flow, next);
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, flow);
- priv_unlock(priv);
+ if (!flow->internal)
+ mlx4_flow_destroy(dev, flow, error);
+ flow = next;
+ }
return 0;
}
/**
- * Destroy all flows.
+ * Helper function to determine the next configured VLAN filter.
*
* @param priv
* Pointer to private structure.
+ * @param vlan
+ * VLAN ID to use as a starting point.
+ *
+ * @return
+ * Next configured VLAN ID or a high value (>= 4096) if there is none.
*/
-static void
-priv_flow_flush(struct priv *priv)
+static uint16_t
+mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
+{
+ while (vlan < 4096) {
+ if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
+ (UINT64_C(1) << (vlan % 64)))
+ return vlan;
+ ++vlan;
+ }
+ return vlan;
+}
+
+/**
+ * Generate internal flow rules.
+ *
+ * Various flow rules are created depending on the mode the device is in:
+ *
+ * 1. Promiscuous: port MAC + catch-all (VLAN filtering is ignored).
+ * 2. All multicast: port MAC/VLAN + catch-all multicast.
+ * 3. Otherwise: port MAC/VLAN + broadcast MAC/VLAN.
+ *
+ * About MAC flow rules:
+ *
+ * - MAC flow rules are generated from @p dev->data->mac_addrs
+ * (@p priv->mac array).
+ * - An additional flow rule for Ethernet broadcasts is also generated.
+ * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * is enabled and VLAN filters are configured.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
{
- while (!LIST_EMPTY(&priv->flows)) {
- struct rte_flow *flow;
+ struct rte_flow_attr attr = {
+ .priority = MLX4_FLOW_PRIORITY_LAST,
+ .ingress = 1,
+ };
+ struct rte_flow_item_eth eth_spec;
+ const struct rte_flow_item_eth eth_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ const struct rte_flow_item_eth eth_allmulti = {
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_vlan vlan_spec;
+ const struct rte_flow_item_vlan vlan_mask = {
+ .tci = RTE_BE16(0x0fff),
+ };
+ struct rte_flow_item pattern[] = {
+ {
+ .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &eth_spec,
+ .mask = &eth_mask,
+ },
+ {
+ /* Replaced with VLAN if filtering is enabled. */
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ /*
+ * Round number of queues down to their previous power of 2 to
+ * comply with RSS context limitations. Extra queues silently do not
+ * get RSS by default.
+ */
+ uint32_t queues =
+ rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
+ [offsetof(struct rte_flow_action_rss, queue) +
+ sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
+ struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = rss_conf,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct ether_addr *rule_mac = &eth_spec.dst;
+ rte_be16_t *rule_vlan =
+ priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ !priv->dev->data->promiscuous ?
+ &vlan_spec.tci :
+ NULL;
+ int broadcast =
+ !priv->dev->data->promiscuous &&
+ !priv->dev->data->all_multicast;
+ uint16_t vlan = 0;
+ struct rte_flow *flow;
+ unsigned int i;
+ int err = 0;
- flow = LIST_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ /* Nothing to be done if there are no Rx queues. */
+ if (!queues)
+ goto error;
+ /* Prepare default RSS configuration. */
+ *rss_conf = (struct rte_flow_action_rss){
+ .rss_conf = NULL, /* Rely on default fallback settings. */
+ .num = queues,
+ };
+ for (i = 0; i != queues; ++i)
+ rss_conf->queue[i] = i;
+ /*
+ * Set up VLAN item if filtering is enabled and at least one VLAN
+ * filter is configured.
+ */
+ if (rule_vlan) {
+ vlan = mlx4_flow_internal_next_vlan(priv, 0);
+ if (vlan < 4096) {
+ pattern[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &vlan_spec,
+ .mask = &vlan_mask,
+ };
+next_vlan:
+ *rule_vlan = rte_cpu_to_be_16(vlan);
+ } else {
+ rule_vlan = NULL;
+ }
}
+ for (i = 0; i != RTE_DIM(priv->mac) + broadcast; ++i) {
+ const struct ether_addr *mac;
+
+ /* Broadcasts are handled by an extra iteration. */
+ if (i < RTE_DIM(priv->mac))
+ mac = &priv->mac[i];
+ else
+ mac = &eth_mask.dst;
+ if (is_zero_ether_addr(mac))
+ continue;
+ /* Check if MAC flow rule is already present. */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_NEXT(flow, next)) {
+ const struct ibv_flow_spec_eth *eth =
+ (const void *)((uintptr_t)flow->ibv_attr +
+ sizeof(*flow->ibv_attr));
+ unsigned int j;
+
+ if (!flow->mac)
+ continue;
+ assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
+ assert(flow->ibv_attr->num_of_specs == 1);
+ assert(eth->type == IBV_FLOW_SPEC_ETH);
+ assert(flow->rss);
+ if (rule_vlan &&
+ (eth->val.vlan_tag != *rule_vlan ||
+ eth->mask.vlan_tag != RTE_BE16(0x0fff)))
+ continue;
+ if (!rule_vlan && eth->mask.vlan_tag)
+ continue;
+ for (j = 0; j != sizeof(mac->addr_bytes); ++j)
+ if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
+ eth->mask.dst_mac[j] != UINT8_C(0xff) ||
+ eth->val.src_mac[j] != UINT8_C(0x00) ||
+ eth->mask.src_mac[j] != UINT8_C(0x00))
+ break;
+ if (j != sizeof(mac->addr_bytes))
+ continue;
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, rss_conf->queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ continue;
+ break;
+ }
+ if (!flow || !flow->internal) {
+ /* Not found, create a new flow rule. */
+ memcpy(rule_mac, mac, sizeof(*mac));
+ flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ actions, error);
+ if (!flow) {
+ err = -rte_errno;
+ goto error;
+ }
+ }
+ flow->select = 1;
+ flow->mac = 1;
+ }
+ if (rule_vlan) {
+ vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
+ if (vlan < 4096)
+ goto next_vlan;
+ }
+ /* Take care of promiscuous and all multicast flow rules. */
+ if (!broadcast) {
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_NEXT(flow, next)) {
+ if (priv->dev->data->promiscuous) {
+ if (flow->promisc)
+ break;
+ } else {
+ assert(priv->dev->data->all_multicast);
+ if (flow->allmulti)
+ break;
+ }
+ }
+ if (flow && flow->internal) {
+ assert(flow->rss);
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, rss_conf->queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ flow = NULL;
+ }
+ if (!flow || !flow->internal) {
+ /* Not found, create a new flow rule. */
+ if (priv->dev->data->promiscuous) {
+ pattern[1].spec = NULL;
+ pattern[1].mask = NULL;
+ } else {
+ assert(priv->dev->data->all_multicast);
+ pattern[1].spec = &eth_allmulti;
+ pattern[1].mask = &eth_allmulti;
+ }
+ pattern[2] = pattern[3];
+ flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ actions, error);
+ if (!flow) {
+ err = -rte_errno;
+ goto error;
+ }
+ }
+ assert(flow->promisc || flow->allmulti);
+ flow->select = 1;
+ }
+error:
+ /* Clear selection and clean up stale internal flow rules. */
+ flow = LIST_FIRST(&priv->flows);
+ while (flow && flow->internal) {
+ struct rte_flow *next = LIST_NEXT(flow, next);
+
+ if (!flow->select)
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ else
+ flow->select = 0;
+ flow = next;
+ }
+ return err;
}
/**
- * Destroy all flows.
+ * Synchronize flow rules.
*
- * @see rte_flow_flush()
- * @see rte_flow_ops
+ * This function synchronizes flow rules with the state of the device by
+ * taking into account isolated mode and whether target queues are
+ * configured.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+ int ret;
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv);
- priv_unlock(priv);
+ /* Internal flow rules are guaranteed to come first in the list. */
+ if (priv->isolated) {
+ /*
+ * Get rid of them in isolated mode, stop at the first
+ * non-internal rule found.
+ */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_FIRST(&priv->flows))
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ } else {
+ /* Refresh internal rules. */
+ ret = mlx4_flow_internal(priv, error);
+ if (ret)
+ return ret;
+ }
+ /* Toggle the remaining flow rules . */
+ LIST_FOREACH(flow, &priv->flows, next) {
+ ret = mlx4_flow_toggle(priv, flow, priv->started, error);
+ if (ret)
+ return ret;
+ }
+ if (!priv->started)
+ assert(!priv->drop);
return 0;
}
/**
- * Remove all flows.
+ * Clean up all flow rules.
*
- * Called by dev_stop() to remove all flows.
+ * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
+ * rules regardless of whether they are internal or user-configured.
*
* @param priv
* Pointer to private structure.
*/
void
-mlx4_priv_flow_stop(struct priv *priv)
+mlx4_flow_clean(struct priv *priv)
{
struct rte_flow *flow;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
- flow->ibv_flow = NULL;
- DEBUG("Flow %p removed", (void *)flow);
- }
- mlx4_flow_destroy_drop_queue(priv);
+ while ((flow = LIST_FIRST(&priv->flows)))
+ mlx4_flow_destroy(priv->dev, flow, NULL);
+ assert(LIST_EMPTY(&priv->rss));
}
+static const struct rte_flow_ops mlx4_flow_ops = {
+ .validate = mlx4_flow_validate,
+ .create = mlx4_flow_create,
+ .destroy = mlx4_flow_destroy,
+ .flush = mlx4_flow_flush,
+ .isolate = mlx4_flow_isolate,
+};
+
/**
- * Add all flows.
+ * Manage filter operations.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_priv_flow_start(struct priv *priv)
+mlx4_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
{
- int ret;
- struct ibv_qp *qp;
- struct rte_flow *flow;
-
- ret = mlx4_flow_create_drop_queue(priv);
- if (ret)
- return -1;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
- flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
- if (!flow->ibv_flow) {
- DEBUG("Flow %p cannot be applied", (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
- }
- DEBUG("Flow %p applied", (void *)flow);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ break;
+ *(const void **)arg = &mlx4_flow_ops;
+ return 0;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
}
- return 0;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index beabcf2d..651fd37b 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,12 +34,10 @@
#ifndef RTE_PMD_MLX4_FLOW_H_
#define RTE_PMD_MLX4_FLOW_H_
-#include <stddef.h>
#include <stdint.h>
#include <sys/queue.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
@@ -48,61 +46,40 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_byteorder.h>
-#include "mlx4.h"
+/** Last and lowest priority level for a flow rule. */
+#define MLX4_FLOW_PRIORITY_LAST UINT32_C(0xfff)
+/** Meta pattern item used to distinguish internal rules. */
+#define MLX4_FLOW_ITEM_TYPE_INTERNAL ((enum rte_flow_item_type)-1)
+
+/** PMD-specific (mlx4) definition of a flow rule handle. */
struct rte_flow {
LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
struct ibv_flow *ibv_flow; /**< Verbs flow. */
struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
+ uint32_t ibv_attr_size; /**< Size of Verbs attributes. */
+ uint32_t select:1; /**< Used by operations on the linked list. */
+ uint32_t internal:1; /**< Internal flow rule outside isolated mode. */
+ uint32_t mac:1; /**< Rule associated with a configured MAC address. */
+ uint32_t promisc:1; /**< This rule matches everything. */
+ uint32_t allmulti:1; /**< This rule matches all multicast traffic. */
+ uint32_t drop:1; /**< This rule drops packets. */
+ struct mlx4_rss *rss; /**< Rx target. */
};
-int
-mlx4_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-
-struct rte_flow *
-mlx4_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-
-int
-mlx4_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error);
-
-int
-mlx4_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error);
-
-/** Structure to pass to the conversion function. */
-struct mlx4_flow {
- struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
- unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
-};
-
-int
-mlx4_flow_isolate(struct rte_eth_dev *dev,
- int enable,
- struct rte_flow_error *error);
-
-struct mlx4_flow_action {
- uint32_t drop:1; /**< Target is a drop queue. */
- uint32_t queue:1; /**< Target is a receive queue. */
- uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queue indices to use. */
- uint16_t queues_n; /**< Number of entries in queue[] */
-};
+/* mlx4_flow.c */
-int mlx4_priv_flow_start(struct priv *priv);
-void mlx4_priv_flow_stop(struct priv *priv);
+int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
+void mlx4_flow_clean(struct priv *priv);
+int mlx4_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
#endif /* RTE_PMD_MLX4_FLOW_H_ */
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
new file mode 100644
index 00000000..b17d109a
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -0,0 +1,397 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Interrupts handling for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_alarm.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_io.h>
+#include <rte_interrupts.h>
+
+#include "mlx4.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+static int mlx4_link_status_check(struct priv *priv);
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_rx_intr_vec_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->nb_efd = 0;
+ intr_handle->intr_vec = NULL;
+}
+
+/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx4_rx_intr_vec_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int count = 0;
+ struct rte_intr_handle *intr_handle = &priv->intr_handle;
+
+ mlx4_rx_intr_vec_disable(priv);
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || !rxq->channel) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ rte_errno = E2BIG;
+ ERROR("too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ mlx4_rx_intr_vec_disable(priv);
+ return -rte_errno;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->channel->fd;
+ count++;
+ }
+ if (!count)
+ mlx4_rx_intr_vec_disable(priv);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Process scheduled link status check.
+ *
+ * If LSC interrupts are requested, process related callback.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_link_status_alarm(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ assert(priv->intr_alarm == 1);
+ priv->intr_alarm = 0;
+ if (intr_conf->lsc && !mlx4_link_status_check(priv))
+ _rte_eth_dev_callback_process(priv->dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
+}
+
+/**
+ * Check link status.
+ *
+ * In case of inconsistency, another check is scheduled.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success (link status is consistent), negative errno value
+ * otherwise and rte_errno is set.
+ */
+static int
+mlx4_link_status_check(struct priv *priv)
+{
+ struct rte_eth_link *link = &priv->dev->data->dev_link;
+ int ret = mlx4_link_update(priv->dev, 0);
+
+ if (ret)
+ return ret;
+ if ((!link->link_speed && link->link_status) ||
+ (link->link_speed && !link->link_status)) {
+ if (!priv->intr_alarm) {
+ /* Inconsistent status, check again later. */
+ ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
+ (void (*)(void *))
+ mlx4_link_status_alarm,
+ priv);
+ if (ret)
+ return ret;
+ priv->intr_alarm = 1;
+ }
+ rte_errno = EINPROGRESS;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+/**
+ * Handle interrupts from the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_interrupt_handler(struct priv *priv)
+{
+ enum { LSC, RMV, };
+ static const enum rte_eth_event_type type[] = {
+ [LSC] = RTE_ETH_EVENT_INTR_LSC,
+ [RMV] = RTE_ETH_EVENT_INTR_RMV,
+ };
+ uint32_t caught[RTE_DIM(type)] = { 0 };
+ struct ibv_async_event event;
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+ unsigned int i;
+
+ /* Read all message and acknowledge them. */
+ while (!ibv_get_async_event(priv->ctx, &event)) {
+ switch (event.event_type) {
+ case IBV_EVENT_PORT_ACTIVE:
+ case IBV_EVENT_PORT_ERR:
+ if (intr_conf->lsc && !mlx4_link_status_check(priv))
+ ++caught[LSC];
+ break;
+ case IBV_EVENT_DEVICE_FATAL:
+ if (intr_conf->rmv)
+ ++caught[RMV];
+ break;
+ default:
+ DEBUG("event type %d on physical port %d not handled",
+ event.event_type, event.element.port_num);
+ }
+ ibv_ack_async_event(&event);
+ }
+ for (i = 0; i != RTE_DIM(caught); ++i)
+ if (caught[i])
+ _rte_eth_dev_callback_process(priv->dev, type[i],
+ NULL, NULL);
+}
+
+/**
+ * MLX4 CQ notification .
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ * @param solicited
+ * Is request solicited or not.
+ */
+static void
+mlx4_arm_cq(struct rxq *rxq, int solicited)
+{
+ struct mlx4_cq *cq = &rxq->mcq;
+ uint64_t doorbell;
+ uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
+ uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
+ uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
+
+ *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
+ /*
+ * Make sure that the doorbell record in host memory is
+ * written before ringing the doorbell via PCI MMIO.
+ */
+ rte_wmb();
+ doorbell = sn << 28 | cmd | cq->cqn;
+ doorbell <<= 32;
+ doorbell |= ci;
+ rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_intr_uninstall(struct priv *priv)
+{
+ int err = rte_errno; /* Make sure rte_errno remains unchanged. */
+
+ if (priv->intr_handle.fd != -1) {
+ rte_intr_callback_unregister(&priv->intr_handle,
+ (void (*)(void *))
+ mlx4_interrupt_handler,
+ priv);
+ priv->intr_handle.fd = -1;
+ }
+ rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
+ priv->intr_alarm = 0;
+ mlx4_rx_intr_vec_disable(priv);
+ rte_errno = err;
+ return 0;
+}
+
+/**
+ * Install interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_intr_install(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+ int rc;
+
+ mlx4_intr_uninstall(priv);
+ if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
+ goto error;
+ if (intr_conf->lsc | intr_conf->rmv) {
+ priv->intr_handle.fd = priv->ctx->async_fd;
+ rc = rte_intr_callback_register(&priv->intr_handle,
+ (void (*)(void *))
+ mlx4_interrupt_handler,
+ priv);
+ if (rc < 0) {
+ rte_errno = -rc;
+ goto error;
+ }
+ }
+ return 0;
+error:
+ mlx4_intr_uninstall(priv);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq = dev->data->rx_queues[idx];
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ } else {
+ ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq->cq)
+ ret = EINVAL;
+ }
+ if (ret) {
+ rte_errno = ret;
+ WARN("unable to disable interrupt on rx queue %d",
+ idx);
+ } else {
+ rxq->mcq.arm_sn++;
+ ibv_ack_cq_events(rxq->cq, 1);
+ }
+ return -ret;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq = dev->data->rx_queues[idx];
+ int ret = 0;
+
+ if (!rxq || !rxq->channel) {
+ ret = EINVAL;
+ rte_errno = ret;
+ WARN("unable to arm interrupt on rx queue %d", idx);
+ } else {
+ mlx4_arm_cq(rxq, 0);
+ }
+ return -ret;
+}
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
new file mode 100644
index 00000000..2a3e2695
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -0,0 +1,293 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Memory management functions for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
+
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+struct mlx4_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/**
+ * Called by mlx4_check_mempool() when iterating the memory chunks.
+ *
+ * @param[in] mp
+ * Pointer to memory pool (unused).
+ * @param[in, out] data
+ * Pointer to shared buffer with mlx4_check_mempool().
+ * @param[in] memhdr
+ * Pointer to mempool chunk header.
+ * @param mem_idx
+ * Mempool element index (unused).
+ */
+static void
+mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx)
+{
+ struct mlx4_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contiguous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area.
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area.
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int
+mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end)
+{
+ struct mlx4_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+ return data.ret;
+}
+
+/**
+ * Obtain a memory region from a memory pool.
+ *
+ * If a matching memory region already exists, it is returned with its
+ * reference count incremented, otherwise a new one is registered.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param mp
+ * Pointer to memory pool.
+ *
+ * @return
+ * Memory region pointer, NULL in case of error and rte_errno is set.
+ */
+struct mlx4_mr *
+mlx4_mr_get(struct priv *priv, struct rte_mempool *mp)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start;
+ uintptr_t end;
+ unsigned int i;
+ struct mlx4_mr *mr;
+
+ if (mlx4_check_mempool(mp, &start, &end) != 0) {
+ rte_errno = EINVAL;
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+ DEBUG("mempool %p area start=%p end=%p size=%zu",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
+
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ rte_spinlock_lock(&priv->mr_lock);
+ LIST_FOREACH(mr, &priv->mr, next)
+ if (mp == mr->mp && start >= mr->start && end <= mr->end)
+ break;
+ if (mr) {
+ ++mr->refcnt;
+ goto release;
+ }
+ mr = rte_malloc(__func__, sizeof(*mr), 0);
+ if (!mr) {
+ rte_errno = ENOMEM;
+ goto release;
+ }
+ *mr = (struct mlx4_mr){
+ .start = start,
+ .end = end,
+ .refcnt = 1,
+ .priv = priv,
+ .mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
+ IBV_ACCESS_LOCAL_WRITE),
+ .mp = mp,
+ };
+ if (mr->mr) {
+ mr->lkey = mr->mr->lkey;
+ LIST_INSERT_HEAD(&priv->mr, mr, next);
+ } else {
+ rte_free(mr);
+ mr = NULL;
+ rte_errno = errno ? errno : EINVAL;
+ }
+release:
+ rte_spinlock_unlock(&priv->mr_lock);
+ return mr;
+}
+
+/**
+ * Release a memory region.
+ *
+ * This function decrements its reference count and destroys it after
+ * reaching 0.
+ *
+ * Note to avoid race conditions given this function may be used from the
+ * data plane, it's extremely important that each user holds its own
+ * reference.
+ *
+ * @param mr
+ * Memory region to release.
+ */
+void
+mlx4_mr_put(struct mlx4_mr *mr)
+{
+ struct priv *priv = mr->priv;
+
+ rte_spinlock_lock(&priv->mr_lock);
+ assert(mr->refcnt);
+ if (--mr->refcnt)
+ goto release;
+ LIST_REMOVE(mr, next);
+ claim_zero(ibv_dereg_mr(mr->mr));
+ rte_free(mr);
+release:
+ rte_spinlock_unlock(&priv->mr_lock);
+}
+
+/**
+ * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[].
+ * If mp2mr[] is full, remove an entry first.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param[in] mp
+ * Memory pool for which a memory region lkey must be added.
+ * @param[in] i
+ * Index in memory pool (MP) where to add memory region (MR).
+ *
+ * @return
+ * Added mr->lkey on success, (uint32_t)-1 on failure.
+ */
+uint32_t
+mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i)
+{
+ struct mlx4_mr *mr;
+
+ /* Add a new entry, register MR first. */
+ DEBUG("%p: discovered new memory pool \"%s\" (%p)",
+ (void *)txq, mp->name, (void *)mp);
+ mr = mlx4_mr_get(txq->priv, mp);
+ if (unlikely(mr == NULL)) {
+ DEBUG("%p: unable to configure MR, mlx4_mr_get() failed",
+ (void *)txq);
+ return (uint32_t)-1;
+ }
+ if (unlikely(i == RTE_DIM(txq->mp2mr))) {
+ /* Table is full, remove oldest entry. */
+ DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
+ (void *)txq);
+ --i;
+ mlx4_mr_put(txq->mp2mr[0].mr);
+ memmove(&txq->mp2mr[0], &txq->mp2mr[1],
+ (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
+ }
+ /* Store the new entry. */
+ txq->mp2mr[i].mp = mp;
+ txq->mp2mr[i].mr = mr;
+ txq->mp2mr[i].lkey = mr->lkey;
+ DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
+ return txq->mp2mr[i].lkey;
+}
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
new file mode 100644
index 00000000..fcc7c129
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -0,0 +1,177 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MLX4_PRM_H_
+#define MLX4_PRM_H_
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/* ConnectX-3 Tx queue basic block. */
+#define MLX4_TXBB_SHIFT 6
+#define MLX4_TXBB_SIZE (1 << MLX4_TXBB_SHIFT)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes. */
+#define MLX4_MAX_WQE_SIZE 512
+#define MLX4_MAX_WQE_TXBBS (MLX4_MAX_WQE_SIZE / MLX4_TXBB_SIZE)
+
+/* Send queue stamping/invalidating information. */
+#define MLX4_SQ_STAMP_STRIDE 64
+#define MLX4_SQ_STAMP_DWORDS (MLX4_SQ_STAMP_STRIDE / 4)
+#define MLX4_SQ_STAMP_SHIFT 31
+#define MLX4_SQ_STAMP_VAL 0x7fffffff
+
+/* Work queue element (WQE) flags. */
+#define MLX4_BIT_WQE_OWN 0x80000000
+#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
+#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
+
+#define MLX4_SIZE_TO_TXBBS(size) \
+ (RTE_ALIGN((size), (MLX4_TXBB_SIZE)) >> (MLX4_TXBB_SHIFT))
+
+/* CQE checksum flags. */
+enum {
+ MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
+ MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26),
+ MLX4_CQE_L2_TUNNEL = (int)(1u << 27),
+ MLX4_CQE_L2_VLAN_MASK = (int)(3u << 29),
+ MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),
+};
+
+/* CQE status flags. */
+#define MLX4_CQE_STATUS_IPV4 (1 << 22)
+#define MLX4_CQE_STATUS_IPV4F (1 << 23)
+#define MLX4_CQE_STATUS_IPV6 (1 << 24)
+#define MLX4_CQE_STATUS_IPV4OPT (1 << 25)
+#define MLX4_CQE_STATUS_TCP (1 << 26)
+#define MLX4_CQE_STATUS_UDP (1 << 27)
+#define MLX4_CQE_STATUS_PTYPE_MASK \
+ (MLX4_CQE_STATUS_IPV4 | \
+ MLX4_CQE_STATUS_IPV4F | \
+ MLX4_CQE_STATUS_IPV6 | \
+ MLX4_CQE_STATUS_IPV4OPT | \
+ MLX4_CQE_STATUS_TCP | \
+ MLX4_CQE_STATUS_UDP)
+
+/* Send queue information. */
+struct mlx4_sq {
+ volatile uint8_t *buf; /**< SQ buffer. */
+ volatile uint8_t *eob; /**< End of SQ buffer */
+ uint32_t head; /**< SQ head counter in units of TXBBS. */
+ uint32_t tail; /**< SQ tail counter in units of TXBBS. */
+ uint32_t txbb_cnt; /**< Num of WQEBB in the Q (should be ^2). */
+ uint32_t txbb_cnt_mask; /**< txbbs_cnt mask (txbb_cnt is ^2). */
+ uint32_t headroom_txbbs; /**< Num of txbbs that should be kept free. */
+ volatile uint32_t *db; /**< Pointer to the doorbell. */
+ uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
+};
+
+#define mlx4_get_send_wqe(sq, n) ((sq)->buf + ((n) * (MLX4_TXBB_SIZE)))
+
+/* Completion queue events, numbers and masks. */
+#define MLX4_CQ_DB_GEQ_N_MASK 0x3
+#define MLX4_CQ_DOORBELL 0x20
+#define MLX4_CQ_DB_CI_MASK 0xffffff
+
+/* Completion queue information. */
+struct mlx4_cq {
+ volatile void *cq_uar; /**< CQ user access region. */
+ volatile void *cq_db_reg; /**< CQ doorbell register. */
+ volatile uint32_t *set_ci_db; /**< Pointer to the CQ doorbell. */
+ volatile uint32_t *arm_db; /**< Arming Rx events doorbell. */
+ volatile uint8_t *buf; /**< Pointer to the completion queue buffer. */
+ uint32_t cqe_cnt; /**< Number of entries in the queue. */
+ uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */
+ uint32_t cons_index; /**< Last queue entry that was handled. */
+ uint32_t cqn; /**< CQ number. */
+ int arm_sn; /**< Rx event counter. */
+};
+
+/**
+ * Retrieve a CQE entry from a CQ.
+ *
+ * cqe = cq->buf + cons_index * cqe_size + cqe_offset
+ *
+ * Where cqe_size is 32 or 64 bytes and cqe_offset is 0 or 32 (depending on
+ * cqe_size).
+ *
+ * @param cq
+ * CQ to retrieve entry from.
+ * @param index
+ * Entry index.
+ *
+ * @return
+ * Pointer to CQE entry.
+ */
+static inline volatile struct mlx4_cqe *
+mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index)
+{
+ return (volatile struct mlx4_cqe *)(cq->buf +
+ ((index & (cq->cqe_cnt - 1)) <<
+ (5 + cq->cqe_64)) +
+ (cq->cqe_64 << 5));
+}
+
+/**
+ * Transpose a flag in a value.
+ *
+ * @param val
+ * Input value.
+ * @param from
+ * Flag to retrieve from input value.
+ * @param to
+ * Flag to set in output value.
+ *
+ * @return
+ * Output value with transposed flag enabled if present on input.
+ */
+static inline uint64_t
+mlx4_transpose(uint64_t val, uint64_t from, uint64_t to)
+{
+ return (from >= to ?
+ (val & from) / (from / to) :
+ (val & from) * (to / from));
+}
+
+#endif /* MLX4_PRM_H_ */
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
new file mode 100644
index 00000000..8b97a894
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -0,0 +1,873 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Rx queues configuration for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_flow.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Historical RSS hash key.
+ *
+ * This used to be the default for mlx4 in Linux before v3.19 switched to
+ * generating random hash keys through netdev_rss_key_fill().
+ *
+ * It is used in this PMD for consistency with past DPDK releases but can
+ * now be overridden through user configuration.
+ *
+ * Note: this is not const to work around API quirks.
+ */
+uint8_t
+mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
+ 0x2c, 0xc6, 0x81, 0xd1,
+ 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19,
+ 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9,
+ 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7,
+ 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3,
+ 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+/**
+ * Obtain a RSS context with specified properties.
+ *
+ * Used when creating a flow rule targeting one or several Rx queues.
+ *
+ * If a matching RSS context already exists, it is returned with its
+ * reference count incremented.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param fields
+ * Fields for RSS processing (Verbs format).
+ * @param[in] key
+ * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
+ * @param queues
+ * Number of target queues.
+ * @param[in] queue_id
+ * Target queues.
+ *
+ * @return
+ * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx4_rss *
+mlx4_rss_get(struct priv *priv, uint64_t fields,
+ uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ uint16_t queues, const uint16_t queue_id[])
+{
+ struct mlx4_rss *rss;
+ size_t queue_id_size = sizeof(queue_id[0]) * queues;
+
+ LIST_FOREACH(rss, &priv->rss, next)
+ if (fields == rss->fields &&
+ queues == rss->queues &&
+ !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
+ !memcmp(queue_id, rss->queue_id, queue_id_size)) {
+ ++rss->refcnt;
+ return rss;
+ }
+ rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
+ queue_id_size, 0);
+ if (!rss)
+ goto error;
+ *rss = (struct mlx4_rss){
+ .priv = priv,
+ .refcnt = 1,
+ .usecnt = 0,
+ .qp = NULL,
+ .ind = NULL,
+ .fields = fields,
+ .queues = queues,
+ };
+ memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
+ memcpy(rss->queue_id, queue_id, queue_id_size);
+ LIST_INSERT_HEAD(&priv->rss, rss, next);
+ return rss;
+error:
+ rte_errno = ENOMEM;
+ return NULL;
+}
+
+/**
+ * Release a RSS context instance.
+ *
+ * Used when destroying a flow rule targeting one or several Rx queues.
+ *
+ * This function decrements the reference count of the context and destroys
+ * it after reaching 0. The context must have no users at this point; all
+ * prior calls to mlx4_rss_attach() must have been followed by matching
+ * calls to mlx4_rss_detach().
+ *
+ * @param rss
+ * RSS context to release.
+ */
+void
+mlx4_rss_put(struct mlx4_rss *rss)
+{
+ assert(rss->refcnt);
+ if (--rss->refcnt)
+ return;
+ assert(!rss->usecnt);
+ assert(!rss->qp);
+ assert(!rss->ind);
+ LIST_REMOVE(rss, next);
+ rte_free(rss);
+}
+
+/**
+ * Attach a user to a RSS context instance.
+ *
+ * Used when the RSS QP and indirection table objects must be instantiated,
+ * that is, when a flow rule must be enabled.
+ *
+ * This function increments the usage count of the context.
+ *
+ * @param rss
+ * RSS context to attach to.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rss_attach(struct mlx4_rss *rss)
+{
+ assert(rss->refcnt);
+ if (rss->usecnt++) {
+ assert(rss->qp);
+ assert(rss->ind);
+ return 0;
+ }
+
+ struct ibv_wq *ind_tbl[rss->queues];
+ struct priv *priv = rss->priv;
+ const char *msg;
+ unsigned int i = 0;
+ int ret;
+
+ if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
+ ret = EINVAL;
+ msg = "number of RSS queues must be a power of two";
+ goto error;
+ }
+ for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
+ uint16_t id = rss->queue_id[i];
+ struct rxq *rxq = NULL;
+
+ if (id < priv->dev->data->nb_rx_queues)
+ rxq = priv->dev->data->rx_queues[id];
+ if (!rxq) {
+ ret = EINVAL;
+ msg = "RSS target queue is not configured";
+ goto error;
+ }
+ ret = mlx4_rxq_attach(rxq);
+ if (ret) {
+ ret = -ret;
+ msg = "unable to attach RSS target queue";
+ goto error;
+ }
+ ind_tbl[i] = rxq->wq;
+ }
+ rss->ind = ibv_create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
+ .ind_tbl = ind_tbl,
+ .comp_mask = 0,
+ });
+ if (!rss->ind) {
+ ret = errno ? errno : EINVAL;
+ msg = "RSS indirection table creation failure";
+ goto error;
+ }
+ rss->qp = ibv_create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .comp_mask = (IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_RX_HASH |
+ IBV_QP_INIT_ATTR_IND_TABLE),
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .pd = priv->pd,
+ .rwq_ind_tbl = rss->ind,
+ .rx_hash_conf = {
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rx_hash_key = rss->key,
+ .rx_hash_fields_mask = rss->fields,
+ },
+ });
+ if (!rss->qp) {
+ ret = errno ? errno : EINVAL;
+ msg = "RSS hash QP creation failure";
+ goto error;
+ }
+ ret = ibv_modify_qp
+ (rss->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
+ if (ret) {
+ msg = "failed to switch RSS hash QP to INIT state";
+ goto error;
+ }
+ ret = ibv_modify_qp
+ (rss->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ msg = "failed to switch RSS hash QP to RTR state";
+ goto error;
+ }
+ return 0;
+error:
+ if (rss->qp) {
+ claim_zero(ibv_destroy_qp(rss->qp));
+ rss->qp = NULL;
+ }
+ if (rss->ind) {
+ claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
+ rss->ind = NULL;
+ }
+ while (i--)
+ mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ ERROR("mlx4: %s", msg);
+ --rss->usecnt;
+ rte_errno = ret;
+ return -ret;
+}
+
+/**
+ * Detach a user from a RSS context instance.
+ *
+ * Used when disabling (not destroying) a flow rule.
+ *
+ * This function decrements the usage count of the context and destroys
+ * usage resources after reaching 0.
+ *
+ * @param rss
+ * RSS context to detach from.
+ */
+void
+mlx4_rss_detach(struct mlx4_rss *rss)
+{
+ struct priv *priv = rss->priv;
+ unsigned int i;
+
+ assert(rss->refcnt);
+ assert(rss->qp);
+ assert(rss->ind);
+ if (--rss->usecnt)
+ return;
+ claim_zero(ibv_destroy_qp(rss->qp));
+ rss->qp = NULL;
+ claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
+ rss->ind = NULL;
+ for (i = 0; i != rss->queues; ++i)
+ mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+}
+
+/**
+ * Initialize common RSS context resources.
+ *
+ * Because ConnectX-3 hardware limitations require a fixed order in the
+ * indirection table, WQs must be allocated sequentially to be part of a
+ * common RSS context.
+ *
+ * Since a newly created WQ cannot be moved to a different context, this
+ * function allocates them all at once, one for each configured Rx queue,
+ * as well as all related resources (CQs and mbufs).
+ *
+ * This must therefore be done before creating any Rx flow rules relying on
+ * indirection tables.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rss_init(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+ uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
+ uint32_t wq_num_prev = 0;
+ const char *msg;
+ unsigned int i;
+ int ret;
+
+ /* Prepare range for RSS contexts before creating the first WQ. */
+ ret = mlx4dv_set_context_attr(priv->ctx,
+ MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
+ &log2_range);
+ if (ret) {
+ ERROR("cannot set up range size for RSS context to %u"
+ " (for %u Rx queues), error: %s",
+ 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
+ rte_errno = ret;
+ return -ret;
+ }
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct ibv_cq *cq;
+ struct ibv_wq *wq;
+ uint32_t wq_num;
+
+ /* Attach the configured Rx queues. */
+ if (rxq) {
+ assert(!rxq->usecnt);
+ ret = mlx4_rxq_attach(rxq);
+ if (!ret) {
+ wq_num = rxq->wq->wq_num;
+ goto wq_num_check;
+ }
+ ret = -ret;
+ msg = "unable to create Rx queue resources";
+ goto error;
+ }
+ /*
+ * WQs are temporarily allocated for unconfigured Rx queues
+ * to maintain proper index alignment in indirection table
+ * by skipping unused WQ numbers.
+ *
+ * The reason this works at all even though these WQs are
+ * immediately destroyed is that WQNs are allocated
+ * sequentially and are guaranteed to never be reused in the
+ * same context by the underlying implementation.
+ */
+ cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!cq) {
+ ret = ENOMEM;
+ msg = "placeholder CQ creation failure";
+ goto error;
+ }
+ wq = ibv_create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (wq) {
+ wq_num = wq->wq_num;
+ claim_zero(ibv_destroy_wq(wq));
+ } else {
+ wq_num = 0; /* Shut up GCC 4.8 warnings. */
+ }
+ claim_zero(ibv_destroy_cq(cq));
+ if (!wq) {
+ ret = ENOMEM;
+ msg = "placeholder WQ creation failure";
+ goto error;
+ }
+wq_num_check:
+ /*
+ * While guaranteed by the implementation, make sure WQ
+ * numbers are really sequential (as the saying goes,
+ * trust, but verify).
+ */
+ if (i && wq_num - wq_num_prev != 1) {
+ if (rxq)
+ mlx4_rxq_detach(rxq);
+ ret = ERANGE;
+ msg = "WQ numbers are not sequential";
+ goto error;
+ }
+ wq_num_prev = wq_num;
+ }
+ return 0;
+error:
+ ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
+ i, msg, strerror(ret));
+ while (i--) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ if (rxq)
+ mlx4_rxq_detach(rxq);
+ }
+ rte_errno = ret;
+ return -ret;
+}
+
+/**
+ * Release common RSS context resources.
+ *
+ * As the reverse of mlx4_rss_init(), this must be done after removing all
+ * flow rules relying on indirection tables.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_rss_deinit(struct priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ if (rxq) {
+ assert(rxq->usecnt == 1);
+ mlx4_rxq_detach(rxq);
+ }
+ }
+}
+
+/**
+ * Attach a user to a Rx queue.
+ *
+ * Used when the resources of an Rx queue must be instantiated for it to
+ * become in a usable state.
+ *
+ * This function increments the usage count of the Rx queue.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rxq_attach(struct rxq *rxq)
+{
+ if (rxq->usecnt++) {
+ assert(rxq->cq);
+ assert(rxq->wq);
+ assert(rxq->wqes);
+ assert(rxq->rq_db);
+ return 0;
+ }
+
+ struct priv *priv = rxq->priv;
+ const uint32_t elts_n = 1 << rxq->elts_n;
+ const uint32_t sges_n = 1 << rxq->sges_n;
+ struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
+ struct mlx4dv_obj mlxdv;
+ struct mlx4dv_rwq dv_rwq;
+ struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
+ const char *msg;
+ struct ibv_cq *cq = NULL;
+ struct ibv_wq *wq = NULL;
+ volatile struct mlx4_wqe_data_seg (*wqes)[];
+ unsigned int i;
+ int ret;
+
+ assert(rte_is_power_of_2(elts_n));
+ cq = ibv_create_cq(priv->ctx, elts_n / sges_n, NULL, rxq->channel, 0);
+ if (!cq) {
+ ret = ENOMEM;
+ msg = "CQ creation failure";
+ goto error;
+ }
+ wq = ibv_create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = elts_n / sges_n,
+ .max_sge = sges_n,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (!wq) {
+ ret = errno ? errno : EINVAL;
+ msg = "WQ creation failure";
+ goto error;
+ }
+ ret = ibv_modify_wq
+ (wq,
+ &(struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ });
+ if (ret) {
+ msg = "WQ state change to IBV_WQS_RDY failed";
+ goto error;
+ }
+ /* Retrieve device queue information. */
+ mlxdv.cq.in = cq;
+ mlxdv.cq.out = &dv_cq;
+ mlxdv.rwq.in = wq;
+ mlxdv.rwq.out = &dv_rwq;
+ ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
+ if (ret) {
+ msg = "failed to obtain device information from WQ/CQ objects";
+ goto error;
+ }
+ wqes = (volatile struct mlx4_wqe_data_seg (*)[])
+ ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
+ for (i = 0; i != RTE_DIM(*elts); ++i) {
+ volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
+ struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
+
+ if (buf == NULL) {
+ while (i--) {
+ rte_pktmbuf_free_seg((*elts)[i]);
+ (*elts)[i] = NULL;
+ }
+ ret = ENOMEM;
+ msg = "cannot allocate mbuf";
+ goto error;
+ }
+ /* Headroom is reserved by rte_pktmbuf_alloc(). */
+ assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
+ /* Buffer is supposed to be empty. */
+ assert(rte_pktmbuf_data_len(buf) == 0);
+ assert(rte_pktmbuf_pkt_len(buf) == 0);
+ /* Only the first segment keeps headroom. */
+ if (i % sges_n)
+ buf->data_off = 0;
+ buf->port = rxq->port_id;
+ buf->data_len = rte_pktmbuf_tailroom(buf);
+ buf->pkt_len = rte_pktmbuf_tailroom(buf);
+ buf->nb_segs = 1;
+ *scat = (struct mlx4_wqe_data_seg){
+ .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t)),
+ .byte_count = rte_cpu_to_be_32(buf->data_len),
+ .lkey = rte_cpu_to_be_32(rxq->mr->lkey),
+ };
+ (*elts)[i] = buf;
+ }
+ DEBUG("%p: allocated and configured %u segments (max %u packets)",
+ (void *)rxq, elts_n, elts_n / sges_n);
+ rxq->cq = cq;
+ rxq->wq = wq;
+ rxq->wqes = wqes;
+ rxq->rq_db = dv_rwq.rdb;
+ rxq->mcq.buf = dv_cq.buf.buf;
+ rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
+ rxq->mcq.set_ci_db = dv_cq.set_ci_db;
+ rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
+ rxq->mcq.arm_db = dv_cq.arm_db;
+ rxq->mcq.arm_sn = dv_cq.arm_sn;
+ rxq->mcq.cqn = dv_cq.cqn;
+ rxq->mcq.cq_uar = dv_cq.cq_uar;
+ rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
+ /* Update doorbell counter. */
+ rxq->rq_ci = elts_n / sges_n;
+ rte_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ return 0;
+error:
+ if (wq)
+ claim_zero(ibv_destroy_wq(wq));
+ if (cq)
+ claim_zero(ibv_destroy_cq(cq));
+ rte_errno = ret;
+ ERROR("error while attaching Rx queue %p: %s: %s",
+ (void *)rxq, msg, strerror(ret));
+ return -ret;
+}
+
+/**
+ * Detach a user from a Rx queue.
+ *
+ * This function decrements the usage count of the Rx queue and destroys
+ * usage resources after reaching 0.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ */
+void
+mlx4_rxq_detach(struct rxq *rxq)
+{
+ unsigned int i;
+ struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
+
+ if (--rxq->usecnt)
+ return;
+ rxq->rq_ci = 0;
+ memset(&rxq->mcq, 0, sizeof(rxq->mcq));
+ rxq->rq_db = NULL;
+ rxq->wqes = NULL;
+ claim_zero(ibv_destroy_wq(rxq->wq));
+ rxq->wq = NULL;
+ claim_zero(ibv_destroy_cq(rxq->cq));
+ rxq->cq = NULL;
+ DEBUG("%p: freeing Rx queue elements", (void *)rxq);
+ for (i = 0; (i != RTE_DIM(*elts)); ++i) {
+ if (!(*elts)[i])
+ continue;
+ rte_pktmbuf_free_seg((*elts)[i]);
+ (*elts)[i] = NULL;
+ }
+}
+
+/**
+ * DPDK callback to configure a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Rx queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
+ struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
+ struct rxq *rxq;
+ struct mlx4_malloc_vec vec[] = {
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*rxq),
+ .addr = (void **)&rxq,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*elts),
+ .addr = (void **)&elts,
+ },
+ };
+ int ret;
+
+ (void)conf; /* Thresholds configuration (ignored). */
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq) {
+ rte_errno = EEXIST;
+ ERROR("%p: Rx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ if (!desc) {
+ rte_errno = EINVAL;
+ ERROR("%p: invalid number of Rx descriptors", (void *)dev);
+ return -rte_errno;
+ }
+ if (desc != RTE_DIM(*elts)) {
+ desc = RTE_DIM(*elts);
+ WARN("%p: increased number of descriptors in Rx queue %u"
+ " to the next power of two (%u)",
+ (void *)dev, idx, desc);
+ }
+ /* Allocate and initialize Rx queue. */
+ mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
+ if (!rxq) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ *rxq = (struct rxq){
+ .priv = priv,
+ .mp = mp,
+ .port_id = dev->data->port_id,
+ .sges_n = 0,
+ .elts_n = rte_log2_u32(desc),
+ .elts = elts,
+ /* Toggle Rx checksum offload if hardware supports it. */
+ .csum = (priv->hw_csum &&
+ dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum_l2tun = (priv->hw_csum_l2tun &&
+ dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .stats = {
+ .idx = idx,
+ },
+ .socket = socket,
+ };
+ /* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ ;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ uint32_t size =
+ RTE_PKTMBUF_HEADROOM +
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t sges_n;
+
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
+ rxq->sges_n = sges_n;
+ /* Make sure sges_n did not overflow. */
+ size = mb_len * (1 << rxq->sges_n);
+ size -= RTE_PKTMBUF_HEADROOM;
+ if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ (void *)dev,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ goto error;
+ }
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ DEBUG("%p: maximum number of segments per packet: %u",
+ (void *)dev, 1 << rxq->sges_n);
+ if (desc % (1 << rxq->sges_n)) {
+ rte_errno = EINVAL;
+ ERROR("%p: number of Rx queue descriptors (%u) is not a"
+ " multiple of maximum segments per packet (%u)",
+ (void *)dev,
+ desc,
+ 1 << rxq->sges_n);
+ goto error;
+ }
+ /* Use the entire Rx mempool as the memory region. */
+ rxq->mr = mlx4_mr_get(priv, mp);
+ if (!rxq->mr) {
+ ERROR("%p: MR creation failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ rxq->channel = ibv_create_comp_channel(priv->ctx);
+ if (rxq->channel == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("%p: Rx interrupt completion channel creation"
+ " failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
+ ERROR("%p: unable to make Rx interrupt completion"
+ " channel non-blocking: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ }
+ DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
+ dev->data->rx_queues[idx] = rxq;
+ return 0;
+error:
+ dev->data->rx_queues[idx] = NULL;
+ ret = rte_errno;
+ mlx4_rx_queue_release(rxq);
+ rte_errno = ret;
+ assert(rte_errno > 0);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to release a Rx queue.
+ *
+ * @param dpdk_rxq
+ * Generic Rx queue pointer.
+ */
+void
+mlx4_rx_queue_release(void *dpdk_rxq)
+{
+ struct rxq *rxq = (struct rxq *)dpdk_rxq;
+ struct priv *priv;
+ unsigned int i;
+
+ if (rxq == NULL)
+ return;
+ priv = rxq->priv;
+ for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
+ if (priv->dev->data->rx_queues[i] == rxq) {
+ DEBUG("%p: removing Rx queue %p from list",
+ (void *)priv->dev, (void *)rxq);
+ priv->dev->data->rx_queues[i] = NULL;
+ break;
+ }
+ assert(!rxq->cq);
+ assert(!rxq->wq);
+ assert(!rxq->wqes);
+ assert(!rxq->rq_db);
+ if (rxq->channel)
+ claim_zero(ibv_destroy_comp_channel(rxq->channel));
+ if (rxq->mr)
+ mlx4_mr_put(rxq->mr);
+ rte_free(rxq);
+}
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
new file mode 100644
index 00000000..3985e06d
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -0,0 +1,1071 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Data plane functions for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_io.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx4.h"
+#include "mlx4_prm.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+#define WQE_ONE_DATA_SEG_SIZE \
+ (sizeof(struct mlx4_wqe_ctrl_seg) + sizeof(struct mlx4_wqe_data_seg))
+
+/**
+ * Pointer-value pair structure used in tx_post_send for saving the first
+ * DWORD (32 byte) of a TXBB.
+ */
+struct pv {
+ volatile struct mlx4_wqe_data_seg *dseg;
+ uint32_t val;
+};
+
+/** A table to translate Rx completion flags to packet type. */
+uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
+ /*
+ * The index to the array should have:
+ * bit[7] - MLX4_CQE_L2_TUNNEL
+ * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
+ * bit[5] - MLX4_CQE_STATUS_UDP
+ * bit[4] - MLX4_CQE_STATUS_TCP
+ * bit[3] - MLX4_CQE_STATUS_IPV4OPT
+ * bit[2] - MLX4_CQE_STATUS_IPV6
+ * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[0] - MLX4_CQE_STATUS_IPV4
+ * giving a total of up to 256 entries.
+ */
+ [0x00] = RTE_PTYPE_L2_ETHER,
+ [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
+ [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_FRAG,
+ [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_TCP,
+ [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_TCP,
+ [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_TCP,
+ [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_UDP,
+ [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_UDP,
+ [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_UDP,
+ /* Tunneled - L3 IPV6 */
+ [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
+ /* Tunneled - L3 IPV6, TCP */
+ [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ /* Tunneled - L3 IPV6, UDP */
+ [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* Tunneled - L3 IPV4 */
+ [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ /* Tunneled - L3 IPV4, TCP */
+ [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_TCP,
+ /* Tunneled - L3 IPV4, UDP */
+ [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L4_UDP,
+};
+
+/**
+ * Stamp a WQE so it won't be reused by the HW.
+ *
+ * Routine is used when freeing WQE used by the chip or when failing
+ * building an WQ entry has failed leaving partial information on the queue.
+ *
+ * @param sq
+ * Pointer to the SQ structure.
+ * @param index
+ * Index of the freed WQE.
+ * @param num_txbbs
+ * Number of blocks to stamp.
+ * If < 0 the routine will use the size written in the WQ entry.
+ * @param owner
+ * The value of the WQE owner bit to use in the stamp.
+ *
+ * @return
+ * The number of Tx basic blocs (TXBB) the WQE contained.
+ */
+static int
+mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
+{
+ uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
+ (!!owner << MLX4_SQ_STAMP_SHIFT));
+ volatile uint8_t *wqe = mlx4_get_send_wqe(sq,
+ (index & sq->txbb_cnt_mask));
+ volatile uint32_t *ptr = (volatile uint32_t *)wqe;
+ int i;
+ int txbbs_size;
+ int num_txbbs;
+
+ /* Extract the size from the control segment of the WQE. */
+ num_txbbs = MLX4_SIZE_TO_TXBBS((((volatile struct mlx4_wqe_ctrl_seg *)
+ wqe)->fence_size & 0x3f) << 4);
+ txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
+ /* Optimize the common case when there is no wrap-around. */
+ if (wqe + txbbs_size <= sq->eob) {
+ /* Stamp the freed descriptor. */
+ for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += MLX4_SQ_STAMP_DWORDS;
+ }
+ } else {
+ /* Stamp the freed descriptor. */
+ for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += MLX4_SQ_STAMP_DWORDS;
+ if ((volatile uint8_t *)ptr >= sq->eob) {
+ ptr = (volatile uint32_t *)sq->buf;
+ stamp ^= RTE_BE32(0x80000000);
+ }
+ }
+ }
+ return num_txbbs;
+}
+
+/**
+ * Manage Tx completions.
+ *
+ * When sending a burst, mlx4_tx_burst() posts several WRs.
+ * To improve performance, a completion event is only required once every
+ * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
+ * for other WRs, but this information would not be used anyway.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
+ struct mlx4_sq *sq)
+{
+ unsigned int elts_comp = txq->elts_comp;
+ unsigned int elts_tail = txq->elts_tail;
+ struct mlx4_cq *cq = &txq->mcq;
+ volatile struct mlx4_cqe *cqe;
+ uint32_t cons_index = cq->cons_index;
+ uint16_t new_index;
+ uint16_t nr_txbbs = 0;
+ int pkts = 0;
+
+ /*
+ * Traverse over all CQ entries reported and handle each WQ entry
+ * reported by them.
+ */
+ do {
+ cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
+ if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ !!(cons_index & cq->cqe_cnt)))
+ break;
+ /*
+ * Make sure we read the CQE after we read the ownership bit.
+ */
+ rte_io_rmb();
+#ifndef NDEBUG
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ volatile struct mlx4_err_cqe *cqe_err =
+ (volatile struct mlx4_err_cqe *)cqe;
+ ERROR("%p CQE error - vendor syndrome: 0x%x"
+ " syndrome: 0x%x\n",
+ (void *)txq, cqe_err->vendor_err,
+ cqe_err->syndrome);
+ }
+#endif /* NDEBUG */
+ /* Get WQE index reported in the CQE. */
+ new_index =
+ rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
+ do {
+ /* Free next descriptor. */
+ nr_txbbs +=
+ mlx4_txq_stamp_freed_wqe(sq,
+ (sq->tail + nr_txbbs) & sq->txbb_cnt_mask,
+ !!((sq->tail + nr_txbbs) & sq->txbb_cnt));
+ pkts++;
+ } while (((sq->tail + nr_txbbs) & sq->txbb_cnt_mask) !=
+ new_index);
+ cons_index++;
+ } while (1);
+ if (unlikely(pkts == 0))
+ return 0;
+ /* Update CQ. */
+ cq->cons_index = cons_index;
+ *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
+ sq->tail = sq->tail + nr_txbbs;
+ /* Update the list of packets posted for transmission. */
+ elts_comp -= pkts;
+ assert(elts_comp <= txq->elts_comp);
+ /*
+ * Assume completion status is successful as nothing can be done about
+ * it anyway.
+ */
+ elts_tail += pkts;
+ if (elts_tail >= elts_n)
+ elts_tail -= elts_n;
+ txq->elts_tail = elts_tail;
+ txq->elts_comp = elts_comp;
+ return 0;
+}
+
+/**
+ * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
+ * the cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx4_txq_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+static int
+mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
+ volatile struct mlx4_wqe_ctrl_seg **pctrl)
+{
+ int wqe_real_size;
+ int nr_txbbs;
+ struct pv *pv = (struct pv *)txq->bounce_buf;
+ struct mlx4_sq *sq = &txq->msq;
+ uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl;
+ volatile struct mlx4_wqe_data_seg *dseg;
+ struct rte_mbuf *sbuf;
+ uint32_t lkey;
+ uintptr_t addr;
+ uint32_t byte_count;
+ int pv_counter = 0;
+
+ /* Calculate the needed work queue entry size for this packet. */
+ wqe_real_size = sizeof(volatile struct mlx4_wqe_ctrl_seg) +
+ buf->nb_segs * sizeof(volatile struct mlx4_wqe_data_seg);
+ nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
+ /*
+ * Check that there is room for this WQE in the send queue and that
+ * the WQE size is legal.
+ */
+ if (((sq->head - sq->tail) + nr_txbbs +
+ sq->headroom_txbbs) >= sq->txbb_cnt ||
+ nr_txbbs > MLX4_MAX_WQE_TXBBS) {
+ return -1;
+ }
+ /* Get the control and data entries of the WQE. */
+ ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
+ mlx4_get_send_wqe(sq, head_idx);
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)ctrl + sizeof(struct mlx4_wqe_ctrl_seg));
+ *pctrl = ctrl;
+ /* Fill the data segments with buffer information. */
+ for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) {
+ addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ rte_prefetch0((volatile void *)addr);
+ /* Handle WQE wraparound. */
+ if (dseg >= (volatile struct mlx4_wqe_data_seg *)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)sq->buf;
+ dseg->addr = rte_cpu_to_be_64(addr);
+ /* Memory region key (big endian) for this memory pool. */
+ lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ dseg->lkey = rte_cpu_to_be_32(lkey);
+#ifndef NDEBUG
+ /* Calculate the needed work queue entry size for this packet */
+ if (unlikely(dseg->lkey == rte_cpu_to_be_32((uint32_t)-1))) {
+ /* MR does not exist. */
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ /*
+ * Restamp entry in case of failure.
+ * Make sure that size is written correctly
+ * Note that we give ownership to the SW, not the HW.
+ */
+ wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
+ buf->nb_segs * sizeof(struct mlx4_wqe_data_seg);
+ ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
+ mlx4_txq_stamp_freed_wqe(sq, head_idx,
+ (sq->head & sq->txbb_cnt) ? 0 : 1);
+ return -1;
+ }
+#endif /* NDEBUG */
+ if (likely(sbuf->data_len)) {
+ byte_count = rte_cpu_to_be_32(sbuf->data_len);
+ } else {
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ byte_count = RTE_BE32(0x80000000);
+ }
+ /*
+ * If the data segment is not at the beginning of a
+ * Tx basic block (TXBB) then write the byte count,
+ * else postpone the writing to just before updating the
+ * control segment.
+ */
+ if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
+#if RTE_CACHE_LINE_SIZE < 64
+ /*
+ * Need a barrier here before writing the byte_count
+ * fields to make sure that all the data is visible
+ * before the byte_count field is set.
+ * Otherwise, if the segment begins a new cacheline,
+ * the HCA prefetcher could grab the 64-byte chunk and
+ * get a valid (!= 0xffffffff) byte count but stale
+ * data, and end up sending the wrong data.
+ */
+ rte_io_wmb();
+#endif /* RTE_CACHE_LINE_SIZE */
+ dseg->byte_count = byte_count;
+ } else {
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[pv_counter].dseg = dseg;
+ pv[pv_counter++].val = byte_count;
+ }
+ }
+ /* Write the first DWORD of each TXBB save earlier. */
+ if (pv_counter) {
+ /* Need a barrier here before writing the byte_count. */
+ rte_io_wmb();
+ for (--pv_counter; pv_counter >= 0; pv_counter--)
+ pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
+ }
+ /* Fill the control parameters for this packet. */
+ ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
+ return nr_txbbs;
+}
+
+/**
+ * DPDK callback for Tx.
+ *
+ * @param dpdk_txq
+ * Generic pointer to Tx queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ unsigned int elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ unsigned int bytes_sent = 0;
+ unsigned int i;
+ unsigned int max;
+ struct mlx4_sq *sq = &txq->msq;
+ int nr_txbbs;
+
+ assert(txq->elts_comp_cd != 0);
+ if (likely(txq->elts_comp != 0))
+ mlx4_txq_complete(txq, elts_n, sq);
+ max = (elts_n - (elts_head - txq->elts_tail));
+ if (max > elts_n)
+ max -= elts_n;
+ assert(max >= 1);
+ assert(max <= elts_n);
+ /* Always leave one free entry in the ring. */
+ --max;
+ if (max > pkts_n)
+ max = pkts_n;
+ for (i = 0; (i != max); ++i) {
+ struct rte_mbuf *buf = pkts[i];
+ unsigned int elts_head_next =
+ (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
+ struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
+ struct txq_elt *elt = &(*txq->elts)[elts_head];
+ uint32_t owner_opcode = MLX4_OPCODE_SEND;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl;
+ volatile struct mlx4_wqe_data_seg *dseg;
+ union {
+ uint32_t flags;
+ uint16_t flags16[2];
+ } srcrb;
+ uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
+ uint32_t lkey;
+ uintptr_t addr;
+
+ /* Clean up old buffer. */
+ if (likely(elt->buf != NULL)) {
+ struct rte_mbuf *tmp = elt->buf;
+
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(elt, 0x66, sizeof(*elt));
+#endif
+ /* Faster than rte_pktmbuf_free(). */
+ do {
+ struct rte_mbuf *next = tmp->next;
+
+ rte_pktmbuf_free_seg(tmp);
+ tmp = next;
+ } while (tmp != NULL);
+ }
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ if (buf->nb_segs == 1) {
+ /*
+ * Check that there is room for this WQE in the send
+ * queue and that the WQE size is legal
+ */
+ if (((sq->head - sq->tail) + 1 + sq->headroom_txbbs) >=
+ sq->txbb_cnt || 1 > MLX4_MAX_WQE_TXBBS) {
+ elt->buf = NULL;
+ break;
+ }
+ /* Get the control and data entries of the WQE. */
+ ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
+ mlx4_get_send_wqe(sq, head_idx);
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)ctrl +
+ sizeof(struct mlx4_wqe_ctrl_seg));
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ rte_prefetch0((volatile void *)addr);
+ /* Handle WQE wraparound. */
+ if (dseg >=
+ (volatile struct mlx4_wqe_data_seg *)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ sq->buf;
+ dseg->addr = rte_cpu_to_be_64(addr);
+ /* Memory region key (big endian). */
+ lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
+ dseg->lkey = rte_cpu_to_be_32(lkey);
+#ifndef NDEBUG
+ if (unlikely(dseg->lkey ==
+ rte_cpu_to_be_32((uint32_t)-1))) {
+ /* MR does not exist. */
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ /*
+ * Restamp entry in case of failure.
+ * Make sure that size is written correctly
+ * Note that we give ownership to the SW,
+ * not the HW.
+ */
+ ctrl->fence_size =
+ (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
+ mlx4_txq_stamp_freed_wqe(sq, head_idx,
+ (sq->head & sq->txbb_cnt) ? 0 : 1);
+ elt->buf = NULL;
+ break;
+ }
+#endif /* NDEBUG */
+ /* Never be TXBB aligned, no need compiler barrier. */
+ dseg->byte_count = rte_cpu_to_be_32(buf->data_len);
+ /* Fill the control parameters for this packet. */
+ ctrl->fence_size = (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
+ nr_txbbs = 1;
+ } else {
+ nr_txbbs = mlx4_tx_burst_segs(buf, txq, &ctrl);
+ if (nr_txbbs < 0) {
+ elt->buf = NULL;
+ break;
+ }
+ }
+ /*
+ * For raw Ethernet, the SOLICIT flag is used to indicate
+ * that no ICRC should be calculated.
+ */
+ txq->elts_comp_cd -= nr_txbbs;
+ if (unlikely(txq->elts_comp_cd <= 0)) {
+ txq->elts_comp_cd = txq->elts_comp_cd_init;
+ srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
+ MLX4_WQE_CTRL_CQ_UPDATE);
+ } else {
+ srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
+ }
+ /* Enable HW checksum offload if requested */
+ if (txq->csum &&
+ (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
+ const uint64_t is_tunneled = (buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN));
+
+ if (is_tunneled && txq->csum_l2tun) {
+ owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
+ MLX4_WQE_CTRL_IL4_HDR_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ srcrb.flags |=
+ RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
+ } else {
+ srcrb.flags |=
+ RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ }
+ }
+ if (txq->lb) {
+ /*
+ * Copy destination MAC address to the WQE, this allows
+ * loopback in eSwitch, so that VFs and PF can
+ * communicate with each other.
+ */
+ srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
+ ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
+ sizeof(uint16_t)));
+ } else {
+ ctrl->imm = 0;
+ }
+ ctrl->srcrb_flags = srcrb.flags;
+ /*
+ * Make sure descriptor is fully written before
+ * setting ownership bit (because HW can start
+ * executing as soon as we do).
+ */
+ rte_io_wmb();
+ ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
+ ((sq->head & sq->txbb_cnt) ?
+ MLX4_BIT_WQE_OWN : 0));
+ sq->head += nr_txbbs;
+ elt->buf = buf;
+ bytes_sent += buf->pkt_len;
+ elts_head = elts_head_next;
+ }
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Increment send statistics counters. */
+ txq->stats.opackets += i;
+ txq->stats.obytes += bytes_sent;
+ /* Make sure that descriptors are written before doorbell record. */
+ rte_wmb();
+ /* Ring QP doorbell. */
+ rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
+ txq->elts_head = elts_head;
+ txq->elts_comp += i;
+ return i;
+}
+
+/**
+ * Translate Rx completion flags to packet type.
+ *
+ * @param[in] cqe
+ * Pointer to CQE.
+ *
+ * @return
+ * Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
+{
+ uint8_t idx = 0;
+ uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
+ uint32_t status = rte_be_to_cpu_32(cqe->status);
+
+ /*
+ * The index to the array should have:
+ * bit[7] - MLX4_CQE_L2_TUNNEL
+ * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
+ */
+ if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo & MLX4_CQE_L2_TUNNEL))
+ idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
+ ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
+ /*
+ * The index to the array should have:
+ * bit[5] - MLX4_CQE_STATUS_UDP
+ * bit[4] - MLX4_CQE_STATUS_TCP
+ * bit[3] - MLX4_CQE_STATUS_IPV4OPT
+ * bit[2] - MLX4_CQE_STATUS_IPV6
+ * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[0] - MLX4_CQE_STATUS_IPV4
+ * giving a total of up to 256 entries.
+ */
+ idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
+ return mlx4_ptype_table[idx];
+}
+
+/**
+ * Translate Rx completion flags to offload flags.
+ *
+ * @param flags
+ * Rx completion flags returned by mlx4_cqe_flags().
+ * @param csum
+ * Whether Rx checksums are enabled.
+ * @param csum_l2tun
+ * Whether Rx L2 tunnel checksums are enabled.
+ *
+ * @return
+ * Offload flags (ol_flags) in mbuf format.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
+{
+ uint32_t ol_flags = 0;
+
+ if (csum)
+ ol_flags |=
+ mlx4_transpose(flags,
+ MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
+ PKT_RX_IP_CKSUM_GOOD) |
+ mlx4_transpose(flags,
+ MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_GOOD);
+ if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
+ ol_flags |=
+ mlx4_transpose(flags,
+ MLX4_CQE_L2_TUNNEL_IPOK,
+ PKT_RX_IP_CKSUM_GOOD) |
+ mlx4_transpose(flags,
+ MLX4_CQE_L2_TUNNEL_L4_CSUM,
+ PKT_RX_L4_CKSUM_GOOD);
+ return ol_flags;
+}
+
+/**
+ * Extract checksum information from CQE flags.
+ *
+ * @param cqe
+ * Pointer to CQE structure.
+ * @param csum
+ * Whether Rx checksums are enabled.
+ * @param csum_l2tun
+ * Whether Rx L2 tunnel checksums are enabled.
+ *
+ * @return
+ * CQE checksum information.
+ */
+static inline uint32_t
+mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
+{
+ uint32_t flags = 0;
+
+ /*
+ * The relevant bits are in different locations on their
+ * CQE fields therefore we can join them in one 32bit
+ * variable.
+ */
+ if (csum)
+ flags = (rte_be_to_cpu_32(cqe->status) &
+ MLX4_CQE_STATUS_IPV4_CSUM_OK);
+ if (csum_l2tun)
+ flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
+ (MLX4_CQE_L2_TUNNEL |
+ MLX4_CQE_L2_TUNNEL_IPOK |
+ MLX4_CQE_L2_TUNNEL_L4_CSUM |
+ MLX4_CQE_L2_TUNNEL_IPV4));
+ return flags;
+}
+
+/**
+ * Poll one CQE from CQ.
+ *
+ * @param rxq
+ * Pointer to the receive queue structure.
+ * @param[out] out
+ * Just polled CQE.
+ *
+ * @return
+ * Number of bytes of the CQE, 0 in case there is no completion.
+ */
+static unsigned int
+mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
+{
+ int ret = 0;
+ volatile struct mlx4_cqe *cqe = NULL;
+ struct mlx4_cq *cq = &rxq->mcq;
+
+ cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
+ if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ !!(cq->cons_index & cq->cqe_cnt))
+ goto out;
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rte_rmb();
+ assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
+ assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
+ MLX4_CQE_OPCODE_ERROR);
+ ret = rte_be_to_cpu_32(cqe->byte_cnt);
+ ++cq->cons_index;
+out:
+ *out = cqe;
+ return ret;
+}
+
+/**
+ * DPDK callback for Rx with scattered packets support.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to Rx queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct rxq *rxq = dpdk_rxq;
+ const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
+ const uint16_t sges_n = rxq->sges_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ unsigned int i = 0;
+ uint32_t rq_ci = rxq->rq_ci << sges_n;
+ int len = 0;
+
+ while (pkts_n) {
+ volatile struct mlx4_cqe *cqe;
+ uint32_t idx = rq_ci & wr_cnt;
+ struct rte_mbuf *rep = (*rxq->elts)[idx];
+ volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
+
+ /* Update the 'next' pointer of the previous segment. */
+ if (pkt)
+ seg->next = rep;
+ seg = rep;
+ rte_prefetch0(seg);
+ rte_prefetch0(scat);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ if (!pkt) {
+ /*
+ * No buffers before we even started,
+ * bail out silently.
+ */
+ break;
+ }
+ while (pkt != seg) {
+ assert(pkt != (*rxq->elts)[idx]);
+ rep = pkt->next;
+ pkt->next = NULL;
+ pkt->nb_segs = 1;
+ rte_mbuf_raw_free(pkt);
+ pkt = rep;
+ }
+ break;
+ }
+ if (!pkt) {
+ /* Looking for the new packet. */
+ len = mlx4_cq_poll_one(rxq, &cqe);
+ if (!len) {
+ rte_mbuf_raw_free(rep);
+ break;
+ }
+ if (unlikely(len < 0)) {
+ /* Rx error, packet is likely too large. */
+ rte_mbuf_raw_free(rep);
+ ++rxq->stats.idropped;
+ goto skip;
+ }
+ pkt = seg;
+ /* Update packet information. */
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ pkt->ol_flags = 0;
+ pkt->pkt_len = len;
+ if (rxq->csum | rxq->csum_l2tun) {
+ uint32_t flags =
+ mlx4_cqe_flags(cqe,
+ rxq->csum,
+ rxq->csum_l2tun);
+
+ pkt->ol_flags =
+ rxq_cq_to_ol_flags(flags,
+ rxq->csum,
+ rxq->csum_l2tun);
+ }
+ }
+ rep->nb_segs = 1;
+ rep->port = rxq->port_id;
+ rep->data_len = seg->data_len;
+ rep->data_off = seg->data_off;
+ (*rxq->elts)[idx] = rep;
+ /*
+ * Fill NIC descriptor with the new buffer. The lkey and size
+ * of the buffers are already known, only the buffer address
+ * changes.
+ */
+ scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ if (len > seg->data_len) {
+ len -= seg->data_len;
+ ++pkt->nb_segs;
+ ++rq_ci;
+ continue;
+ }
+ /* The last segment. */
+ seg->data_len = len;
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += pkt->pkt_len;
+ /* Return packet. */
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++i;
+skip:
+ /* Align consumer index to the next stride. */
+ rq_ci >>= sges_n;
+ ++rq_ci;
+ rq_ci <<= sges_n;
+ }
+ if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
+ return 0;
+ /* Update the consumer index. */
+ rxq->rq_ci = rq_ci >> sges_n;
+ rte_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ *rxq->mcq.set_ci_db =
+ rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+ return i;
+}
+
+/**
+ * Dummy DPDK callback for Tx.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to Tx queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_txq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
+
+/**
+ * Dummy DPDK callback for Rx.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to Rx queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ (void)dpdk_rxq;
+ (void)pkts;
+ (void)pkts_n;
+ return 0;
+}
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
new file mode 100644
index 00000000..4acad801
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MLX4_RXTX_H_
+#define MLX4_RXTX_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_prm.h"
+
+/** Rx queue counters. */
+struct mlx4_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+ uint64_t idropped; /**< Total of packets dropped when Rx ring full. */
+ uint64_t rx_nombuf; /**< Total of Rx mbuf allocation failures. */
+};
+
+/** Rx queue descriptor. */
+struct rxq {
+ struct priv *priv; /**< Back pointer to private data. */
+ struct rte_mempool *mp; /**< Memory pool for allocations. */
+ struct mlx4_mr *mr; /**< Memory region. */
+ struct ibv_cq *cq; /**< Completion queue. */
+ struct ibv_wq *wq; /**< Work queue. */
+ struct ibv_comp_channel *channel; /**< Rx completion channel. */
+ uint16_t rq_ci; /**< Saved RQ consumer index. */
+ uint16_t port_id; /**< Port ID for incoming packets. */
+ uint16_t sges_n; /**< Number of segments per packet (log2 value). */
+ uint16_t elts_n; /**< Mbuf queue size (log2 value). */
+ struct rte_mbuf *(*elts)[]; /**< Rx elements. */
+ volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
+ volatile uint32_t *rq_db; /**< RQ doorbell record. */
+ uint32_t csum:1; /**< Enable checksum offloading. */
+ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
+ struct mlx4_rxq_stats stats; /**< Rx queue counters. */
+ unsigned int socket; /**< CPU socket ID for allocations. */
+ uint32_t usecnt; /**< Number of users relying on queue resources. */
+ uint8_t data[]; /**< Remaining queue resources. */
+};
+
+/** Shared flow target for Rx queues. */
+struct mlx4_rss {
+ LIST_ENTRY(mlx4_rss) next; /**< Next entry in list. */
+ struct priv *priv; /**< Back pointer to private data. */
+ uint32_t refcnt; /**< Reference count for this object. */
+ uint32_t usecnt; /**< Number of users relying on @p qp and @p ind. */
+ struct ibv_qp *qp; /**< Queue pair. */
+ struct ibv_rwq_ind_table *ind; /**< Indirection table. */
+ uint64_t fields; /**< Fields for RSS processing (Verbs format). */
+ uint8_t key[MLX4_RSS_HASH_KEY_SIZE]; /**< Hash key to use. */
+ uint16_t queues; /**< Number of target queues. */
+ uint16_t queue_id[]; /**< Target queues. */
+};
+
+/** Tx element. */
+struct txq_elt {
+ struct rte_mbuf *buf; /**< Buffer. */
+};
+
+/** Rx queue counters. */
+struct mlx4_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+ uint64_t odropped; /**< Total of packets not sent when Tx ring full. */
+};
+
+/** Tx queue descriptor. */
+struct txq {
+ struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */
+ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
+ unsigned int elts_head; /**< Current index in (*elts)[]. */
+ unsigned int elts_tail; /**< First element awaiting completion. */
+ unsigned int elts_comp; /**< Number of packets awaiting completion. */
+ int elts_comp_cd; /**< Countdown for next completion. */
+ unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
+ unsigned int elts_n; /**< (*elts)[] length. */
+ struct txq_elt (*elts)[]; /**< Tx elements. */
+ struct mlx4_txq_stats stats; /**< Tx queue counters. */
+ uint32_t max_inline; /**< Max inline send size. */
+ uint32_t csum:1; /**< Enable checksum offloading. */
+ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
+ uint8_t *bounce_buf;
+ /**< Memory used for storing the first DWORD of data TXBBs. */
+ struct {
+ const struct rte_mempool *mp; /**< Cached memory pool. */
+ struct mlx4_mr *mr; /**< Memory region (for mp). */
+ uint32_t lkey; /**< mr->lkey copy. */
+ } mp2mr[MLX4_PMD_TX_MP_CACHE]; /**< MP to MR translation table. */
+ struct priv *priv; /**< Back pointer to private data. */
+ unsigned int socket; /**< CPU socket ID for allocations. */
+ struct ibv_cq *cq; /**< Completion queue. */
+ struct ibv_qp *qp; /**< Queue pair. */
+ uint8_t data[]; /**< Remaining queue resources. */
+};
+
+/* mlx4_rxq.c */
+
+uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
+int mlx4_rss_init(struct priv *priv);
+void mlx4_rss_deinit(struct priv *priv);
+struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
+ uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ uint16_t queues, const uint16_t queue_id[]);
+void mlx4_rss_put(struct mlx4_rss *rss);
+int mlx4_rss_attach(struct mlx4_rss *rss);
+void mlx4_rss_detach(struct mlx4_rss *rss);
+int mlx4_rxq_attach(struct rxq *rxq);
+void mlx4_rxq_detach(struct rxq *rxq);
+int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx4_rx_queue_release(void *dpdk_rxq);
+
+/* mlx4_rxtx.c */
+
+uint16_t mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+
+/* mlx4_txq.c */
+
+int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+void mlx4_tx_queue_release(void *dpdk_txq);
+
+/**
+ * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
+ * Call mlx4_txq_add_mr() if MP is not registered yet.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param[in] mp
+ * Memory pool for which a memory region lkey must be returned.
+ *
+ * @return
+ * mr->lkey on success, (uint32_t)-1 on failure.
+ */
+static inline uint32_t
+mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+{
+ unsigned int i;
+
+ for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
+ if (unlikely(txq->mp2mr[i].mp == NULL)) {
+ /* Unknown MP, add a new MR for it. */
+ break;
+ }
+ if (txq->mp2mr[i].mp == mp) {
+ /* MP found MP. */
+ return txq->mp2mr[i].lkey;
+ }
+ }
+ return mlx4_txq_add_mr(txq, mp, i);
+}
+
+#endif /* MLX4_RXTX_H_ */
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
new file mode 100644
index 00000000..7882a4d0
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -0,0 +1,414 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Tx queues configuration for mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "mlx4.h"
+#include "mlx4_autoconf.h"
+#include "mlx4_prm.h"
+#include "mlx4_rxtx.h"
+#include "mlx4_utils.h"
+
+/**
+ * Free Tx queue elements.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ */
+static void
+mlx4_txq_free_elts(struct txq *txq)
+{
+ unsigned int elts_head = txq->elts_head;
+ unsigned int elts_tail = txq->elts_tail;
+ struct txq_elt (*elts)[txq->elts_n] = txq->elts;
+
+ DEBUG("%p: freeing WRs", (void *)txq);
+ while (elts_tail != elts_head) {
+ struct txq_elt *elt = &(*elts)[elts_tail];
+
+ assert(elt->buf != NULL);
+ rte_pktmbuf_free(elt->buf);
+ elt->buf = NULL;
+ if (++elts_tail == RTE_DIM(*elts))
+ elts_tail = 0;
+ }
+ txq->elts_tail = txq->elts_head;
+}
+
+struct txq_mp2mr_mbuf_check_data {
+ int ret;
+};
+
+/**
+ * Callback function for rte_mempool_obj_iter() to check whether a given
+ * mempool object looks like a mbuf.
+ *
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct mlx4_txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
+ * @param index
+ * Object index, unused.
+ */
+static void
+mlx4_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index)
+{
+ struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct rte_mbuf *buf = obj;
+
+ (void)index;
+ /*
+ * Check whether mbuf structure fits element size and whether mempool
+ * pointer is valid.
+ */
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
+ data->ret = -1;
+}
+
+/**
+ * Iterator function for rte_mempool_walk() to register existing mempools and
+ * fill the MP to MR cache of a Tx queue.
+ *
+ * @param[in] mp
+ * Memory Pool to register.
+ * @param *arg
+ * Pointer to Tx queue structure.
+ */
+static void
+mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+{
+ struct txq *txq = arg;
+ struct txq_mp2mr_mbuf_check_data data = {
+ .ret = 0,
+ };
+
+ /* Register mempool only if the first element looks like a mbuf. */
+ if (rte_mempool_obj_iter(mp, mlx4_txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
+ return;
+ mlx4_txq_mp2mr(txq, mp);
+}
+
+/**
+ * Retrieves information needed in order to directly access the Tx queue.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mlxdv
+ * Pointer to device information for this Tx queue.
+ */
+static void
+mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ struct mlx4_cq *cq = &txq->mcq;
+ struct mlx4dv_qp *dqp = mlxdv->qp.out;
+ struct mlx4dv_cq *dcq = mlxdv->cq.out;
+ uint32_t sq_size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
+
+ sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
+ /* Total length, including headroom and spare WQEs. */
+ sq->eob = sq->buf + sq_size;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->txbb_cnt =
+ (dqp->sq.wqe_cnt << dqp->sq.wqe_shift) >> MLX4_TXBB_SHIFT;
+ sq->txbb_cnt_mask = sq->txbb_cnt - 1;
+ sq->db = dqp->sdb;
+ sq->doorbell_qpn = dqp->doorbell_qpn;
+ sq->headroom_txbbs =
+ (2048 + (1 << dqp->sq.wqe_shift)) >> MLX4_TXBB_SHIFT;
+ cq->buf = dcq->buf.buf;
+ cq->cqe_cnt = dcq->cqe_cnt;
+ cq->set_ci_db = dcq->set_ci_db;
+ cq->cqe_64 = (dcq->cqe_size & 64) ? 1 : 0;
+}
+
+/**
+ * DPDK callback to configure a Tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4dv_obj mlxdv;
+ struct mlx4dv_qp dv_qp;
+ struct mlx4dv_cq dv_cq;
+ struct txq_elt (*elts)[desc];
+ struct ibv_qp_init_attr qp_init_attr;
+ struct txq *txq;
+ uint8_t *bounce_buf;
+ struct mlx4_malloc_vec vec[] = {
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*txq),
+ .addr = (void **)&txq,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = sizeof(*elts),
+ .addr = (void **)&elts,
+ },
+ {
+ .align = RTE_CACHE_LINE_SIZE,
+ .size = MLX4_MAX_WQE_SIZE,
+ .addr = (void **)&bounce_buf,
+ },
+ };
+ int ret;
+
+ (void)conf; /* Thresholds configuration (ignored). */
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+ txq = dev->data->tx_queues[idx];
+ if (txq) {
+ rte_errno = EEXIST;
+ DEBUG("%p: Tx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ if (!desc) {
+ rte_errno = EINVAL;
+ ERROR("%p: invalid number of Tx descriptors", (void *)dev);
+ return -rte_errno;
+ }
+ /* Allocate and initialize Tx queue. */
+ mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket);
+ if (!txq) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ *txq = (struct txq){
+ .priv = priv,
+ .stats = {
+ .idx = idx,
+ },
+ .socket = socket,
+ .elts_n = desc,
+ .elts = elts,
+ .elts_head = 0,
+ .elts_tail = 0,
+ .elts_comp = 0,
+ /*
+ * Request send completion every MLX4_PMD_TX_PER_COMP_REQ
+ * packets or at least 4 times per ring.
+ */
+ .elts_comp_cd =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .elts_comp_cd_init =
+ RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
+ .csum = priv->hw_csum,
+ .csum_l2tun = priv->hw_csum_l2tun,
+ /* Enable Tx loopback for VF devices. */
+ .lb = !!priv->vf,
+ .bounce_buf = bounce_buf,
+ };
+ txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+ if (!txq->cq) {
+ rte_errno = ENOMEM;
+ ERROR("%p: CQ creation failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ qp_init_attr = (struct ibv_qp_init_attr){
+ .send_cq = txq->cq,
+ .recv_cq = txq->cq,
+ .cap = {
+ .max_send_wr =
+ RTE_MIN(priv->device_attr.max_qp_wr, desc),
+ .max_send_sge = 1,
+ .max_inline_data = MLX4_PMD_MAX_INLINE,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ /* No completion events must occur by default. */
+ .sq_sig_all = 0,
+ };
+ txq->qp = ibv_create_qp(priv->pd, &qp_init_attr);
+ if (!txq->qp) {
+ rte_errno = errno ? errno : EINVAL;
+ ERROR("%p: QP creation failure: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ txq->max_inline = qp_init_attr.cap.max_inline_data;
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTS,
+ },
+ IBV_QP_STATE);
+ if (ret) {
+ rte_errno = ret;
+ ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
+ (void *)dev, strerror(rte_errno));
+ goto error;
+ }
+ /* Retrieve device queue information. */
+ mlxdv.cq.in = txq->cq;
+ mlxdv.cq.out = &dv_cq;
+ mlxdv.qp.in = txq->qp;
+ mlxdv.qp.out = &dv_qp;
+ ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = EINVAL;
+ ERROR("%p: failed to obtain information needed for"
+ " accessing the device queues", (void *)dev);
+ goto error;
+ }
+ mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
+ /* Pre-register known mempools. */
+ rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
+ DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
+ dev->data->tx_queues[idx] = txq;
+ return 0;
+error:
+ dev->data->tx_queues[idx] = NULL;
+ ret = rte_errno;
+ mlx4_tx_queue_release(txq);
+ rte_errno = ret;
+ assert(rte_errno > 0);
+ return -rte_errno;
+}
+
+/**
+ * DPDK callback to release a Tx queue.
+ *
+ * @param dpdk_txq
+ * Generic Tx queue pointer.
+ */
+void
+mlx4_tx_queue_release(void *dpdk_txq)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ struct priv *priv;
+ unsigned int i;
+
+ if (txq == NULL)
+ return;
+ priv = txq->priv;
+ for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
+ if (priv->dev->data->tx_queues[i] == txq) {
+ DEBUG("%p: removing Tx queue %p from list",
+ (void *)priv->dev, (void *)txq);
+ priv->dev->data->tx_queues[i] = NULL;
+ break;
+ }
+ mlx4_txq_free_elts(txq);
+ if (txq->qp)
+ claim_zero(ibv_destroy_qp(txq->qp));
+ if (txq->cq)
+ claim_zero(ibv_destroy_cq(txq->cq));
+ for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
+ if (!txq->mp2mr[i].mp)
+ break;
+ assert(txq->mp2mr[i].mr);
+ mlx4_mr_put(txq->mp2mr[i].mr);
+ }
+ rte_free(txq);
+}
diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c
new file mode 100644
index 00000000..f18c7145
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_utils.c
@@ -0,0 +1,217 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * Utility functions used by the mlx4 driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+
+#include "mlx4_utils.h"
+
+/**
+ * Make a file descriptor non-blocking.
+ *
+ * @param fd
+ * File descriptor to alter.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_fd_set_non_blocking(int fd)
+{
+ int ret = fcntl(fd, F_GETFL);
+
+ if (ret != -1 && !fcntl(fd, F_SETFL, ret | O_NONBLOCK))
+ return 0;
+ assert(errno);
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Internal helper to allocate memory once for several disparate objects.
+ *
+ * The most restrictive alignment constraint for standard objects is assumed
+ * to be sizeof(double) and is used as a default value.
+ *
+ * C11 code would include stdalign.h and use alignof(max_align_t) however
+ * we'll stick with C99 for the time being.
+ */
+static inline size_t
+mlx4_mallocv_inline(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int zero, int socket)
+{
+ unsigned int i;
+ size_t size;
+ size_t least;
+ uint8_t *data = NULL;
+ int fill = !vec[0].addr;
+
+fill:
+ size = 0;
+ least = 0;
+ for (i = 0; i < cnt; ++i) {
+ size_t align = (uintptr_t)vec[i].align;
+
+ if (!align) {
+ align = sizeof(double);
+ } else if (!rte_is_power_of_2(align)) {
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (least < align)
+ least = align;
+ align = RTE_ALIGN_CEIL(size, align);
+ size = align + vec[i].size;
+ if (fill && vec[i].addr)
+ *vec[i].addr = data + align;
+ }
+ if (fill)
+ return size;
+ if (!zero)
+ data = rte_malloc_socket(type, size, least, socket);
+ else
+ data = rte_zmalloc_socket(type, size, least, socket);
+ if (data) {
+ fill = 1;
+ goto fill;
+ }
+ rte_errno = ENOMEM;
+error:
+ for (i = 0; i != cnt; ++i)
+ if (vec[i].addr)
+ *vec[i].addr = NULL;
+ return 0;
+}
+
+/**
+ * Allocate memory once for several disparate objects.
+ *
+ * This function adds iovec-like semantics (e.g. readv()) to rte_malloc().
+ * Memory is allocated once for several contiguous objects of nonuniform
+ * sizes and alignment constraints.
+ *
+ * Each entry of @p vec describes the size, alignment constraint and
+ * provides a buffer address where the resulting object pointer must be
+ * stored.
+ *
+ * The buffer of the first entry is guaranteed to point to the beginning of
+ * the allocated region and is safe to use with rte_free().
+ *
+ * NULL buffers are silently ignored.
+ *
+ * Providing a NULL buffer in the first entry prevents this function from
+ * allocating any memory but has otherwise no effect on its behavior. In
+ * this case, the contents of remaining non-NULL buffers are updated with
+ * addresses relative to zero (i.e. offsets that would have been used during
+ * the allocation).
+ *
+ * @param[in] type
+ * A string identifying the type of allocated objects (useful for debug
+ * purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param[in, out] vec
+ * Description of objects to allocate memory for.
+ * @param cnt
+ * Number of entries in @p vec.
+ *
+ * @return
+ * Size in bytes of the allocated region including any padding. In case of
+ * error, rte_errno is set, 0 is returned and NULL is stored in the
+ * non-NULL buffers pointed by @p vec.
+ *
+ * @see struct mlx4_malloc_vec
+ * @see rte_malloc()
+ */
+size_t
+mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 0, SOCKET_ID_ANY);
+}
+
+/**
+ * Combines the semantics of mlx4_mallocv() with those of rte_zmalloc().
+ *
+ * @see mlx4_mallocv()
+ * @see rte_zmalloc()
+ */
+size_t
+mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 1, SOCKET_ID_ANY);
+}
+
+/**
+ * Socket-aware version of mlx4_mallocv().
+ *
+ * This function takes one additional parameter.
+ *
+ * @param socket
+ * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this
+ * function will behave the same as mlx4_mallocv().
+ *
+ * @see mlx4_mallocv()
+ * @see rte_malloc_socket()
+ */
+size_t
+mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 0, socket);
+}
+
+/**
+ * Combines the semantics of mlx4_mallocv_socket() with those of
+ * mlx4_zmalloc_socket().
+ *
+ * @see mlx4_mallocv_socket()
+ * @see rte_zmalloc_socket()
+ */
+size_t
+mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket)
+{
+ return mlx4_mallocv_inline(type, vec, cnt, 1, socket);
+}
diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h
new file mode 100644
index 00000000..dc529c9c
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_utils.h
@@ -0,0 +1,133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MLX4_UTILS_H_
+#define MLX4_UTILS_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+
+#include "mlx4.h"
+
+#ifndef NDEBUG
+
+/*
+ * When debugging is enabled (NDEBUG not defined), file, line and function
+ * information replace the driver name (MLX4_DRIVER_NAME) in log messages.
+ */
+
+/** Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+#define PMD_DRV_LOG(level, ...) \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s:%u: %s(): " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ pmd_drv_log_basename(__FILE__), \
+ __LINE__, \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
+#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
+#define claim_zero(...) \
+ (void)(((__VA_ARGS__) == 0) || \
+ DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
+#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
+
+#else /* NDEBUG */
+
+/*
+ * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
+ * any check when debugging is disabled.
+ */
+
+#define PMD_DRV_LOG(level, ...) \
+ RTE_LOG(level, PMD, \
+ RTE_FMT(MLX4_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+
+#endif /* NDEBUG */
+
+#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
+
+/** Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
+ \
+ snprintf(name, sizeof(name), __VA_ARGS__)
+
+/** Generate a string out of the provided arguments. */
+#define MLX4_STR(...) # __VA_ARGS__
+
+/** Similar to MLX4_STR() with enclosed macros expanded first. */
+#define MLX4_STR_EXPAND(...) MLX4_STR(__VA_ARGS__)
+
+/** Object description used with mlx4_mallocv() and similar functions. */
+struct mlx4_malloc_vec {
+ size_t align; /**< Alignment constraint (power of 2), 0 if unknown. */
+ size_t size; /**< Object size. */
+ void **addr; /**< Storage for allocation address. */
+};
+
+/* mlx4_utils.c */
+
+int mlx4_fd_set_non_blocking(int fd);
+size_t mlx4_mallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt);
+size_t mlx4_zmallocv(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt);
+size_t mlx4_mallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket);
+size_t mlx4_zmallocv_socket(const char *type, const struct mlx4_malloc_vec *vec,
+ unsigned int cnt, int socket);
+
+#endif /* MLX4_UTILS_H_ */
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 8736de5d..a3984eb9 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -39,8 +39,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
-ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec_sse.c
+ifneq ($(filter y,$(CONFIG_RTE_ARCH_X86_64) \
+ $(CONFIG_RTE_ARCH_ARM64)),)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx_vec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_trigger.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ethdev.c
@@ -49,9 +50,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxmode.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -63,7 +64,10 @@ CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
-LDLIBS += -libverbs
+LDLIBS += -libverbs -lmlx5
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
# A few warnings cannot be avoided in external headers.
CFLAGS += -Wno-error=cast-qual
@@ -104,24 +108,24 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
- HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP \
- infiniband/verbs_exp.h \
- enum IBV_EXP_FLOW_SPEC_ACTION_DROP \
+ HAVE_IBV_DEVICE_VXLAN_SUPPORT \
+ infiniband/verbs.h \
+ enum IBV_DEVICE_VXLAN_SUPPORT \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
- infiniband/verbs_exp.h \
- enum IBV_EXP_CQ_COMPRESSED_CQE \
+ HAVE_IBV_WQ_FLAG_RX_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAG_RX_END_PADDING \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
- infiniband/mlx5_hw.h \
- enum MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
+ HAVE_IBV_MLX5_MOD_MPW \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_VERBS_MLX5_OPCODE_TSO \
- infiniband/mlx5_hw.h \
- enum MLX5_OPCODE_TSO \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_ETHTOOL_LINK_MODE_25G \
@@ -139,9 +143,9 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_UPDATE_CQ_CI \
- infiniband/mlx5_hw.h \
- func ibv_mlx5_exp_update_cq_ci \
+ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
+ infiniband/verbs.h \
+ enum IBV_FLOW_SPEC_ACTION_COUNT \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b7e50463..0548d17a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -50,19 +50,13 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_kvargs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -103,6 +97,15 @@
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
+#ifndef HAVE_IBV_MLX5_MOD_MPW
+#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
+#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
+#endif
+
+#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
+#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
+#endif
+
struct mlx5_args {
int cqe_comp;
int txq_inline;
@@ -134,6 +137,52 @@ mlx5_getenv_int(const char *name)
}
/**
+ * Verbs callback to allocate a memory. This function should allocate the space
+ * according to the size provided residing inside a huge page.
+ * Please note that all allocation must respect the alignment from libmlx5
+ * (i.e. currently sysconf(_SC_PAGESIZE)).
+ *
+ * @param[in] size
+ * The size in bytes of the memory to allocate.
+ * @param[in] data
+ * A pointer to the callback data.
+ *
+ * @return
+ * a pointer to the allocate space.
+ */
+static void *
+mlx5_alloc_verbs_buf(size_t size, void *data)
+{
+ struct priv *priv = data;
+ void *ret;
+ size_t alignment = sysconf(_SC_PAGESIZE);
+
+ assert(data != NULL);
+ assert(!mlx5_is_secondary());
+ ret = rte_malloc_socket(__func__, size, alignment,
+ priv->dev->device->numa_node);
+ DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
+ return ret;
+}
+
+/**
+ * Verbs callback to free a memory.
+ *
+ * @param[in] ptr
+ * A pointer to the memory to free.
+ * @param[in] data
+ * A pointer to the callback data.
+ */
+static void
+mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
+{
+ assert(data != NULL);
+ assert(!mlx5_is_secondary());
+ DEBUG("Extern free request: %p", ptr);
+ rte_free(ptr);
+}
+
+/**
* DPDK callback to close the device.
*
* Destroy all queues and objects, free memory.
@@ -146,6 +195,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
{
struct priv *priv = mlx5_get_priv(dev);
unsigned int i;
+ int ret;
priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
@@ -153,48 +203,23 @@ mlx5_dev_close(struct rte_eth_dev *dev)
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
-
- /* Remove flow director elements. */
- priv_fdir_disable(priv);
- priv_fdir_delete_filters_list(priv);
-
+ priv_dev_traffic_disable(priv, dev);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
if (priv->rxqs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl;
-
- if (rxq == NULL)
- continue;
- rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- (*priv->rxqs)[i] = NULL;
- rxq_cleanup(rxq_ctrl);
- rte_free(rxq_ctrl);
- }
+ for (i = 0; (i != priv->rxqs_n); ++i)
+ mlx5_priv_rxq_release(priv, i);
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
- for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
- struct txq_ctrl *txq_ctrl;
-
- if (txq == NULL)
- continue;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- (*priv->txqs)[i] = NULL;
- txq_cleanup(txq_ctrl);
- rte_free(txq_ctrl);
- }
+ for (i = 0; (i != priv->txqs_n); ++i)
+ mlx5_priv_txq_release(priv, i);
priv->txqs_n = 0;
priv->txqs = NULL;
}
@@ -204,18 +229,40 @@ mlx5_dev_close(struct rte_eth_dev *dev)
claim_zero(ibv_close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
- if (priv->rss_conf != NULL) {
- for (i = 0; (i != hash_rxq_init_n); ++i)
- rte_free((*priv->rss_conf)[i]);
- rte_free(priv->rss_conf);
- }
+ if (priv->rss_conf.rss_key != NULL)
+ rte_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
+ priv_socket_uninit(priv);
+ ret = mlx5_priv_hrxq_ibv_verify(priv);
+ if (ret)
+ WARN("%p: some Hash Rx queue still remain", (void *)priv);
+ ret = mlx5_priv_ind_table_ibv_verify(priv);
+ if (ret)
+ WARN("%p: some Indirection table still remain", (void *)priv);
+ ret = mlx5_priv_rxq_ibv_verify(priv);
+ if (ret)
+ WARN("%p: some Verbs Rx queue still remain", (void *)priv);
+ ret = mlx5_priv_rxq_verify(priv);
+ if (ret)
+ WARN("%p: some Rx Queues still remain", (void *)priv);
+ ret = mlx5_priv_txq_ibv_verify(priv);
+ if (ret)
+ WARN("%p: some Verbs Tx queue still remain", (void *)priv);
+ ret = mlx5_priv_txq_verify(priv);
+ if (ret)
+ WARN("%p: some Tx Queues still remain", (void *)priv);
+ ret = priv_flow_verify(priv);
+ if (ret)
+ WARN("%p: some flows still remain", (void *)priv);
+ ret = priv_mr_verify(priv);
+ if (ret)
+ WARN("%p: some Memory Region still remain", (void *)priv);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
-static const struct eth_dev_ops mlx5_dev_ops = {
+const struct eth_dev_ops mlx5_dev_ops = {
.dev_configure = mlx5_dev_configure,
.dev_start = mlx5_dev_start,
.dev_stop = mlx5_dev_stop,
@@ -254,10 +301,55 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.filter_ctrl = mlx5_dev_filter_ctrl,
.rx_descriptor_status = mlx5_rx_descriptor_status,
.tx_descriptor_status = mlx5_tx_descriptor_status,
-#ifdef HAVE_UPDATE_CQ_CI
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
-#endif
+};
+
+static const struct eth_dev_ops mlx5_dev_sec_ops = {
+ .stats_get = mlx5_stats_get,
+ .stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
+ .dev_infos_get = mlx5_dev_infos_get,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+};
+
+/* Available operators in flow isolated mode. */
+const struct eth_dev_ops mlx5_dev_ops_isolate = {
+ .dev_configure = mlx5_dev_configure,
+ .dev_start = mlx5_dev_start,
+ .dev_stop = mlx5_dev_stop,
+ .dev_set_link_down = mlx5_set_link_down,
+ .dev_set_link_up = mlx5_set_link_up,
+ .dev_close = mlx5_dev_close,
+ .link_update = mlx5_link_update,
+ .stats_get = mlx5_stats_get,
+ .stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
+ .dev_infos_get = mlx5_dev_infos_get,
+ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
+ .vlan_filter_set = mlx5_vlan_filter_set,
+ .rx_queue_setup = mlx5_rx_queue_setup,
+ .tx_queue_setup = mlx5_tx_queue_setup,
+ .rx_queue_release = mlx5_rx_queue_release,
+ .tx_queue_release = mlx5_tx_queue_release,
+ .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
+ .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
+ .mac_addr_remove = mlx5_mac_addr_remove,
+ .mac_addr_add = mlx5_mac_addr_add,
+ .mac_addr_set = mlx5_mac_addr_set,
+ .mtu_set = mlx5_dev_set_mtu,
+ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
+ .vlan_offload_set = mlx5_vlan_offload_set,
+ .filter_ctrl = mlx5_dev_filter_ctrl,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_intr_enable = mlx5_rx_intr_enable,
+ .rx_queue_intr_disable = mlx5_rx_intr_disable,
};
static struct {
@@ -449,12 +541,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_device *ibv_dev;
int err = 0;
struct ibv_context *attr_ctx = NULL;
- struct ibv_device_attr device_attr;
+ struct ibv_device_attr_ex device_attr;
unsigned int sriov;
unsigned int mps;
- unsigned int tunnel_en;
+ unsigned int cqe_comp;
+ unsigned int tunnel_en = 0;
int idx;
int i;
+ struct mlx5dv_context attrs_out;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ struct ibv_counter_set_description cs_desc;
+#endif
(void)pci_drv;
assert(pci_drv == &mlx5_driver);
@@ -500,34 +597,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
- /*
- * Multi-packet send is supported by ConnectX-4 Lx PF as well
- * as all ConnectX-5 devices.
- */
switch (pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
tunnel_en = 1;
- mps = MLX5_MPW_DISABLED;
break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
- mps = MLX5_MPW;
- break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
tunnel_en = 1;
- mps = MLX5_MPW_ENHANCED;
break;
default:
- mps = MLX5_MPW_DISABLED;
+ break;
}
INFO("PCI information matches, using device \"%s\""
- " (SR-IOV: %s, %sMPS: %s)",
+ " (SR-IOV: %s)",
list[i]->name,
- sriov ? "true" : "false",
- mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- mps != MLX5_MPW_DISABLED ? "true" : "false");
+ sriov ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
break;
@@ -548,11 +635,33 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr))
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ mlx5dv_query_device(attr_ctx, &attrs_out);
+ if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ DEBUG("Enhanced MPW is supported");
+ mps = MLX5_MPW_ENHANCED;
+ } else {
+ DEBUG("MPW is supported");
+ mps = MLX5_MPW;
+ }
+ } else {
+ DEBUG("MPW isn't supported");
+ mps = MLX5_MPW_DISABLED;
+ }
+ if (RTE_CACHE_LINE_SIZE == 128 &&
+ !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ cqe_comp = 0;
+ else
+ cqe_comp = 1;
+ if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
- INFO("%u port(s) detected", device_attr.phys_port_cnt);
+ INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
- for (i = 0; i < device_attr.phys_port_cnt; i++) {
+ for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
uint32_t test = (1 << i);
struct ibv_context *ctx = NULL;
@@ -560,9 +669,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
struct rte_eth_dev *eth_dev;
- struct ibv_exp_device_attr exp_device_attr;
+ struct ibv_device_attr_ex device_attr_ex;
struct ether_addr mac;
uint16_t num_vfs = 0;
+ struct ibv_device_attr_ex device_attr;
struct mlx5_args args = {
.cqe_comp = MLX5_ARG_UNSET,
.txq_inline = MLX5_ARG_UNSET,
@@ -575,20 +685,49 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.rx_vec_en = MLX5_ARG_UNSET,
};
- exp_device_attr.comp_mask =
- IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
- IBV_EXP_DEVICE_ATTR_RX_HASH |
- IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
- IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
- IBV_EXP_DEVICE_ATTR_TSO_CAPS |
- 0;
+ mlx5_dev[idx].ports |= test;
+
+ if (mlx5_is_secondary()) {
+ /* from rte_ethdev.c */
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "%s port %u",
+ ibv_get_device_name(ibv_dev), port);
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (eth_dev == NULL) {
+ ERROR("can not attach rte ethdev");
+ err = ENOMEM;
+ goto error;
+ }
+ eth_dev->device = &pci_dev->device;
+ eth_dev->dev_ops = &mlx5_dev_sec_ops;
+ priv = eth_dev->data->dev_private;
+ /* Receive command fd from primary process */
+ err = priv_socket_connect(priv);
+ if (err < 0) {
+ err = -err;
+ goto error;
+ }
+ /* Remap UAR for Tx queues. */
+ err = priv_tx_uar_remap(priv, err);
+ if (err < 0) {
+ err = -err;
+ goto error;
+ }
+ priv_dev_select_rx_function(priv, eth_dev);
+ priv_dev_select_tx_function(priv, eth_dev);
+ continue;
+ }
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
- if (ctx == NULL)
+ if (ctx == NULL) {
+ err = ENODEV;
goto port_error;
+ }
+ ibv_query_device_ex(ctx, NULL, &device_attr);
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
@@ -599,6 +738,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
+ err = EINVAL;
goto port_error;
}
@@ -628,12 +768,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
priv->ctx = ctx;
+ strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
+ sizeof(priv->ibdev_path));
priv->device_attr = device_attr;
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = 1; /* Enable compression by default. */
+ priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
/* Enable vector by default if supported. */
priv->tx_vec_en = 1;
@@ -645,25 +787,33 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
goto port_error;
}
mlx5_args_assign(priv, &args);
- if (ibv_exp_query_device(ctx, &exp_device_attr)) {
- ERROR("ibv_exp_query_device() failed");
+ if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
+ ERROR("ibv_query_device_ex() failed");
goto port_error;
}
priv->hw_csum =
- ((exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
- (exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
+ !!(device_attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
(priv->hw_csum ? "" : "not "));
+#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_VXLAN_SUPPORT);
+ IBV_DEVICE_VXLAN_SUPPORT);
+#endif
DEBUG("L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not "));
- priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ priv->counter_set_supported = !!(device_attr.max_counter_sets);
+ ibv_describe_counter_set(ctx, 0, &cs_desc);
+ DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
+#endif
+ priv->ind_table_max_size =
+ device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
if (priv->ind_table_max_size >
@@ -671,29 +821,32 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
- priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
- IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
+ priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DEBUG("VLAN stripping is %ssupported",
(priv->hw_vlan_strip ? "" : "not "));
- priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &
- IBV_EXP_DEVICE_SCATTER_FCS);
+ priv->hw_fcs_strip =
+ !!(device_attr_ex.orig_attr.device_cap_flags &
+ IBV_WQ_FLAGS_SCATTER_FCS);
DEBUG("FCS stripping configuration is %ssupported",
(priv->hw_fcs_strip ? "" : "not "));
- priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;
+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
+ priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+#endif
DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
priv->tso = ((priv->tso) &&
- (exp_device_attr.tso_caps.max_tso > 0) &&
- (exp_device_attr.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_ETH)));
+ (device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
if (priv->tso)
priv->max_tso_payload_sz =
- exp_device_attr.tso_caps.max_tso;
+ device_attr_ex.tso_caps.max_tso;
if (priv->mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
@@ -718,23 +871,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->txq_inline = MLX5_WQE_SIZE_MAX -
MLX5_WQE_SIZE;
}
- /* Allocate and register default RSS hash keys. */
- priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
- sizeof((*priv->rss_conf)[0]), 0);
- if (priv->rss_conf == NULL) {
- err = ENOMEM;
- goto port_error;
+ if (priv->cqe_comp && !cqe_comp) {
+ WARN("Rx CQE compression isn't supported");
+ priv->cqe_comp = 0;
}
- err = rss_hash_rss_conf_new_key(priv,
- rss_hash_default_key,
- rss_hash_default_key_len,
- ETH_RSS_PROTO_MASK);
- if (err)
- goto port_error;
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
" (errno: %s)", strerror(errno));
+ err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -742,14 +887,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
mac.addr_bytes[0], mac.addr_bytes[1],
mac.addr_bytes[2], mac.addr_bytes[3],
mac.addr_bytes[4], mac.addr_bytes[5]);
- /* Register MAC address. */
- claim_zero(priv_mac_addr_add(priv, 0,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac.addr_bytes));
- /* Initialize FD filters list. */
- err = fdir_init_filters_list(priv);
- if (err)
- goto port_error;
#ifndef NDEBUG
{
char ifname[IF_NAMESIZE];
@@ -778,44 +915,26 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
err = ENOMEM;
goto port_error;
}
-
- /* Secondary processes have to use local storage for their
- * private data as well as a copy of eth_dev->data, but this
- * pointer must not be modified before burst functions are
- * actually called. */
- if (mlx5_is_secondary()) {
- struct mlx5_secondary_data *sd =
- &mlx5_secondary_data[eth_dev->data->port_id];
- sd->primary_priv = eth_dev->data->dev_private;
- if (sd->primary_priv == NULL) {
- ERROR("no private data for port %u",
- eth_dev->data->port_id);
- err = EINVAL;
- goto port_error;
- }
- sd->shared_dev_data = eth_dev->data;
- rte_spinlock_init(&sd->lock);
- memcpy(sd->data.name, sd->shared_dev_data->name,
- sizeof(sd->data.name));
- sd->data.dev_private = priv;
- sd->data.rx_mbuf_alloc_failed = 0;
- sd->data.mtu = ETHER_MTU;
- sd->data.port_id = sd->shared_dev_data->port_id;
- sd->data.mac_addrs = priv->mac;
- eth_dev->tx_pkt_burst = mlx5_tx_burst_secondary_setup;
- eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup;
- } else {
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = priv->mac;
- }
-
+ eth_dev->data->dev_private = priv;
+ eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
+ /* Register MAC address. */
+ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
TAILQ_INIT(&priv->flows);
+ TAILQ_INIT(&priv->ctrl_flows);
+
+ /* Hint libmlx5 to use PMD allocator for data plane resources */
+ struct mlx5dv_ctx_allocators alctr = {
+ .alloc = &mlx5_alloc_verbs_buf,
+ .free = &mlx5_free_verbs_buf,
+ .data = priv,
+ };
+ mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&alctr));
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
@@ -824,10 +943,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
continue;
port_error:
- if (priv) {
- rte_free(priv->rss_conf);
+ if (priv)
rte_free(priv);
- }
if (pd)
claim_zero(ibv_dealloc_pd(pd));
if (ctx)
@@ -901,7 +1018,7 @@ static struct rte_pci_driver mlx5_driver = {
},
.id_table = mlx5_pci_id_map,
.probe = mlx5_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
};
/**
@@ -920,6 +1037,9 @@ rte_mlx5_pmd_init(void)
* using this PMD, which is not supported in forked processes.
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+ /* Match the size of Rx completion entry to the size of a cacheline. */
+ if (RTE_CACHE_LINE_SIZE == 128)
+ setenv("MLX5_CQE_SIZE", "128", 0);
ibv_fork_init();
rte_pci_register(&mlx5_driver);
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 43c53841..e6a69b82 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -39,6 +39,7 @@
#include <limits.h>
#include <net/if.h>
#include <netinet/in.h>
+#include <sys/queue.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -50,10 +51,6 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -61,20 +58,12 @@
#include <rte_interrupts.h>
#include <rte_errno.h>
#include <rte_flow.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
-#if !defined(HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE) || \
- !defined(HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE)
-#error Mellanox OFED >= 3.3 is required, please refer to the documentation.
-#endif
-
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};
@@ -98,26 +87,21 @@ struct mlx5_xstats_ctrl {
uint64_t base[MLX5_MAX_XSTATS];
};
+/* Flow list . */
+TAILQ_HEAD(mlx5_flows, rte_flow);
+
struct priv {
- struct rte_eth_dev *dev; /* Ethernet device. */
+ struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
- struct ibv_device_attr device_attr; /* Device properties. */
+ struct ibv_device_attr_ex device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
- /*
- * MAC addresses array and configuration bit-field.
- * An extra entry that cannot be modified by the DPDK is reserved
- * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
- */
- struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES];
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX5_MAX_MAC_ADDRESSES);
+ char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
+ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int started:1; /* Device started, flows enabled. */
- unsigned int promisc_req:1; /* Promiscuous mode requested. */
- unsigned int allmulti_req:1; /* All multicast mode requested. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
@@ -133,6 +117,7 @@ struct priv {
unsigned int isolated:1; /* Whether isolated mode is enabled. */
unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
+ unsigned int counter_set_supported:1; /* Counter set is supported. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
@@ -141,38 +126,31 @@ struct priv {
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
- struct rxq *(*rxqs)[]; /* RX queues. */
- struct txq *(*txqs)[]; /* TX queues. */
- /* Indirection tables referencing all RX WQs. */
- struct ibv_exp_rwq_ind_table *(*ind_tables)[];
- unsigned int ind_tables_n; /* Number of indirection tables. */
+ struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
+ struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
- /* Hash RX QPs feeding the indirection table. */
- struct hash_rxq (*hash_rxqs)[];
- unsigned int hash_rxqs_n; /* Hash RX QPs array size. */
- /* RSS configuration array indexed by hash RX queue type. */
- struct rte_eth_rss_conf *(*rss_conf)[];
- uint64_t rss_hf; /* RSS DPDK bit field of active RSS. */
+ struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */
- struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
- struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
- struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
- TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
+ struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+ struct mlx5_flows flows; /* RTE Flow rules. */
+ struct mlx5_flows ctrl_flows; /* Control flow rules. */
+ LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
+ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
+ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
+ LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
+ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
+ LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
+ /* Verbs Indirection tables. */
+ LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
+ int primary_socket; /* Unix socket for primary process. */
+ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
};
-/* Local storage for secondary process data. */
-struct mlx5_secondary_data {
- struct rte_eth_dev_data data; /* Local device data. */
- struct priv *primary_priv; /* Private structure from primary. */
- struct rte_eth_dev_data *shared_dev_data; /* Shared device data. */
- rte_spinlock_t lock; /* Port configuration lock. */
-} mlx5_secondary_data[RTE_MAX_ETHPORTS];
-
/**
* Lock private structure to protect it from concurrent access in the
* control path.
@@ -228,28 +206,19 @@ void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
-struct priv *mlx5_secondary_data_setup(struct priv *priv);
-void priv_select_tx_function(struct priv *);
-void priv_select_rx_function(struct priv *);
+void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
+void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
/* mlx5_mac.c */
int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]);
-void hash_rxq_mac_addrs_del(struct hash_rxq *);
-void priv_mac_addrs_disable(struct priv *);
void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t);
-int hash_rxq_mac_addrs_add(struct hash_rxq *);
-int priv_mac_addr_add(struct priv *, unsigned int,
- const uint8_t (*)[ETHER_ADDR_LEN]);
-int priv_mac_addrs_enable(struct priv *);
int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
uint32_t);
void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
/* mlx5_rss.c */
-int rss_hash_rss_conf_new_key(struct priv *, const uint8_t *, unsigned int,
- uint64_t);
int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *);
int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *);
int priv_rss_reta_index_resize(struct priv *, unsigned int);
@@ -260,10 +229,6 @@ int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
/* mlx5_rxmode.c */
-int priv_special_flow_enable(struct priv *, enum hash_rxq_flow_type);
-void priv_special_flow_disable(struct priv *, enum hash_rxq_flow_type);
-int priv_special_flow_enable_all(struct priv *);
-void priv_special_flow_disable_all(struct priv *);
void mlx5_promiscuous_enable(struct rte_eth_dev *);
void mlx5_promiscuous_disable(struct rte_eth_dev *);
void mlx5_allmulticast_enable(struct rte_eth_dev *);
@@ -272,7 +237,7 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
/* mlx5_stats.c */
void priv_xstats_init(struct priv *);
-void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
+int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
int mlx5_xstats_get(struct rte_eth_dev *,
struct rte_eth_xstat *, unsigned int);
@@ -283,26 +248,22 @@ int mlx5_xstats_get_names(struct rte_eth_dev *,
/* mlx5_vlan.c */
int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int);
-void mlx5_vlan_offload_set(struct rte_eth_dev *, int);
+int mlx5_vlan_offload_set(struct rte_eth_dev *, int);
void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
/* mlx5_trigger.c */
int mlx5_dev_start(struct rte_eth_dev *);
void mlx5_dev_stop(struct rte_eth_dev *);
+int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *);
+int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *);
+int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *);
+int mlx5_traffic_restart(struct rte_eth_dev *);
-/* mlx5_fdir.c */
+/* mlx5_flow.c */
-void priv_fdir_queue_destroy(struct priv *, struct fdir_queue *);
-int fdir_init_filters_list(struct priv *);
-void priv_fdir_delete_filters_list(struct priv *);
-void priv_fdir_disable(struct priv *);
-void priv_fdir_enable(struct priv *);
int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
enum rte_filter_op, void *);
-
-/* mlx5_flow.c */
-
int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
const struct rte_flow_item [],
const struct rte_flow_action [],
@@ -314,10 +275,35 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
struct rte_flow_error *);
int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *);
+void priv_flow_flush(struct priv *, struct mlx5_flows *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *,
+ enum rte_flow_action_type, void *,
+ struct rte_flow_error *);
int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
-int priv_flow_start(struct priv *);
-void priv_flow_stop(struct priv *);
-int priv_flow_rxq_in_use(struct priv *, struct rxq *);
+int priv_flow_start(struct priv *, struct mlx5_flows *);
+void priv_flow_stop(struct priv *, struct mlx5_flows *);
+int priv_flow_verify(struct priv *);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *,
+ struct rte_flow_item_eth *, struct rte_flow_item_vlan *,
+ struct rte_flow_item_vlan *);
+int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
+ struct rte_flow_item_eth *);
+int priv_flow_create_drop_queue(struct priv *);
+void priv_flow_delete_drop_queue(struct priv *);
+
+/* mlx5_socket.c */
+
+int priv_socket_init(struct priv *priv);
+int priv_socket_uninit(struct priv *priv);
+void priv_socket_handle(struct priv *priv);
+int priv_socket_connect(struct priv *priv);
+
+/* mlx5_mr.c */
+
+struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *);
+struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *);
+int priv_mr_release(struct priv *, struct mlx5_mr *);
+int priv_mr_verify(struct priv *);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index a76bc6f6..3a7706cf 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -45,9 +45,6 @@
/* Maximum number of simultaneous VLAN filters. */
#define MLX5_MAX_VLAN_IDS 128
-/* Maximum number of special flows. */
-#define MLX5_MAX_SPECIAL_FLOWS 4
-
/*
* Request TX completion every time descriptors reach this threshold since
* the previous request. Must be a power of two for performance reasons.
@@ -100,7 +97,8 @@
/*
* Maximum size of burst for vectorized Tx. This is related to the maximum size
- * of Enhaned MPW (eMPW) WQE as vectorized Tx is supported with eMPW.
+ * of Enhanced MPW (eMPW) WQE as vectorized Tx is supported with eMPW.
+ * Careful when changing, large value can cause WQE DS to overlap.
*/
#define MLX5_VPMD_TX_MAX_BURST 32U
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index b0eb3cdf..c31ea4b6 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -31,6 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define _GNU_SOURCE
+
#include <stddef.h>
#include <assert.h>
#include <unistd.h>
@@ -49,21 +51,17 @@
#include <linux/sockios.h>
#include <linux/version.h>
#include <fcntl.h>
+#include <stdalign.h>
+#include <sys/un.h>
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_atomic.h>
#include <rte_ethdev.h>
+#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
#include <rte_malloc.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
@@ -119,7 +117,6 @@ struct ethtool_link_settings {
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
-#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
@@ -133,12 +130,7 @@ struct ethtool_link_settings {
struct priv *
mlx5_get_priv(struct rte_eth_dev *dev)
{
- struct mlx5_secondary_data *sd;
-
- if (!mlx5_is_secondary())
- return dev->data->dev_private;
- sd = &mlx5_secondary_data[dev->data->port_id];
- return sd->data.dev_private;
+ return dev->data->dev_private;
}
/**
@@ -150,7 +142,7 @@ mlx5_get_priv(struct rte_eth_dev *dev)
inline int
mlx5_is_secondary(void)
{
- return rte_eal_process_type() != RTE_PROC_PRIMARY;
+ return rte_eal_process_type() == RTE_PROC_SECONDARY;
}
/**
@@ -174,7 +166,7 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
char match[IF_NAMESIZE] = "";
{
- MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
+ MKSTR(path, "%s/device/net", priv->ibdev_path);
dir = opendir(path);
if (dir == NULL)
@@ -192,7 +184,7 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
continue;
MKSTR(path, "%s/device/net/%s/%s",
- priv->ctx->device->ibdev_path, name,
+ priv->ibdev_path, name,
(dev_type ? "dev_id" : "dev_port"));
file = fopen(path, "rb");
@@ -280,11 +272,11 @@ priv_sysfs_read(const struct priv *priv, const char *entry,
if (priv_is_ib_cntr(entry)) {
MKSTR(path, "%s/ports/1/hw_counters/%s",
- priv->ctx->device->ibdev_path, entry);
+ priv->ibdev_path, entry);
file = fopen(path, "rb");
} else {
MKSTR(path, "%s/device/net/%s/%s",
- priv->ctx->device->ibdev_path, ifname, entry);
+ priv->ibdev_path, ifname, entry);
file = fopen(path, "rb");
}
if (file == NULL)
@@ -327,8 +319,7 @@ priv_sysfs_write(const struct priv *priv, const char *entry,
if (priv_get_ifname(priv, &ifname))
return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
+ MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry);
file = fopen(path, "wb");
if (file == NULL)
@@ -585,8 +576,29 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int i;
unsigned int j;
unsigned int reta_idx_n;
+ const uint8_t use_app_rss_key =
+ !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
- priv->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ if (use_app_rss_key &&
+ (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
+ rss_hash_default_key_len)) {
+ /* MLX5 RSS only support 40bytes key. */
+ return EINVAL;
+ }
+ priv->rss_conf.rss_key =
+ rte_realloc(priv->rss_conf.rss_key,
+ rss_hash_default_key_len, 0);
+ if (!priv->rss_conf.rss_key) {
+ ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
+ return ENOMEM;
+ }
+ memcpy(priv->rss_conf.rss_key,
+ use_app_rss_key ?
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
+ rss_hash_default_key,
+ rss_hash_default_key_len);
+ priv->rss_conf.rss_key_len = rss_hash_default_key_len;
+ priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
@@ -672,8 +684,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
* Since we need one CQ per QP, the limit is the minimum number
* between the two values.
*/
- max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
- priv->device_attr.max_qp : priv->device_attr.max_cq);
+ max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
+ priv->device_attr.orig_attr.max_qp);
/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
if (max >= 65535)
max = 65535;
@@ -686,7 +698,9 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0);
+ (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
+ DEV_RX_OFFLOAD_TIMESTAMP;
+
if (!priv->mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
if (priv->hw_csum)
@@ -704,9 +718,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : priv->ind_table_max_size;
- info->hash_key_size = ((*priv->rss_conf) ?
- (*priv->rss_conf)[0]->rss_key_len :
- 0);
+ info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
priv_unlock(priv);
}
@@ -816,12 +828,7 @@ static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- __extension__ struct {
- struct ethtool_link_settings edata;
- uint32_t link_mode_data[3 *
- ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
- } ecmd;
-
+ struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
@@ -834,23 +841,29 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- memset(&ecmd, 0, sizeof(ecmd));
- ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
- ifr.ifr_data = (void *)&ecmd;
+ ifr.ifr_data = (void *)&gcmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+ gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+ alignas(struct ethtool_link_settings)
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+ struct ethtool_link_settings *ecmd = (void *)data;
+
+ *ecmd = gcmd;
+ ifr.ifr_data = (void *)ecmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = ecmd.edata.speed;
- sc = ecmd.edata.link_mode_masks[0] |
- ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
+ dev_link.link_speed = ecmd->speed;
+ sc = ecmd->link_mode_masks[0] |
+ ((uint64_t)ecmd->link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -886,7 +899,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
@@ -1124,47 +1137,77 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
}
/**
- * Link status handler.
+ * Update the link status.
*
* @param priv
* Pointer to private structure.
- * @param dev
- * Pointer to the rte_eth_dev structure.
*
* @return
- * Nonzero if the callback process can be called immediately.
+ * Zero if the callback process can be called immediately.
*/
static int
-priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
+priv_link_status_update(struct priv *priv)
+{
+ struct rte_eth_link *link = &priv->dev->data->dev_link;
+
+ mlx5_link_update(priv->dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ /*
+ * Inconsistent status. Event likely occurred before the
+ * kernel netdevice exposes the new status.
+ */
+ if (!priv->pending_alarm) {
+ priv->pending_alarm = 1;
+ rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
+ mlx5_dev_link_status_handler,
+ priv->dev);
+ }
+ return 1;
+ } else if (unlikely(priv->pending_alarm)) {
+ /* Link interrupt occurred while alarm is already scheduled. */
+ priv->pending_alarm = 0;
+ rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
+ }
+ return 0;
+}
+
+/**
+ * Device status handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param events
+ * Pointer to event flags holder.
+ *
+ * @return
+ * Events bitmap of callback process which can be called immediately.
+ */
+static uint32_t
+priv_dev_status_handler(struct priv *priv)
{
struct ibv_async_event event;
- struct rte_eth_link *link = &dev->data->dev_link;
- int ret = 0;
+ uint32_t ret = 0;
/* Read all message and acknowledge them. */
for (;;) {
if (ibv_get_async_event(priv->ctx, &event))
break;
-
- if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
- event.event_type != IBV_EVENT_PORT_ERR)
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
- mlx5_link_update(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
- if (!priv->pending_alarm) {
- /* Inconsistent status, check again later. */
- priv->pending_alarm = 1;
- rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
- mlx5_dev_link_status_handler,
- dev);
- }
- } else {
- ret = 1;
- }
+ if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
+ if (priv_link_status_update(priv))
+ ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
return ret;
}
@@ -1184,9 +1227,9 @@ mlx5_dev_link_status_handler(void *arg)
priv_lock(priv);
assert(priv->pending_alarm == 1);
priv->pending_alarm = 0;
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_link_status_update(priv);
priv_unlock(priv);
- if (ret)
+ if (!ret)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
}
@@ -1204,14 +1247,34 @@ mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
- int ret;
+ uint32_t events;
priv_lock(priv);
- ret = priv_dev_link_status_handler(priv, dev);
+ events = priv_dev_status_handler(priv);
priv_unlock(priv);
- if (ret)
+ if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
+ if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
+ NULL);
+}
+
+/**
+ * Handle interrupts from the socket.
+ *
+ * @param cb_arg
+ * Callback argument.
+ */
+static void
+mlx5_dev_handler_socket(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ priv_socket_handle(priv);
+ priv_unlock(priv);
}
/**
@@ -1225,16 +1288,20 @@ mlx5_dev_interrupt_handler(void *cb_arg)
void
priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
{
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
- rte_intr_callback_unregister(&priv->intr_handle,
- mlx5_dev_interrupt_handler,
- dev);
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv)
+ rte_intr_callback_unregister(&priv->intr_handle,
+ mlx5_dev_interrupt_handler, dev);
+ if (priv->primary_socket)
+ rte_intr_callback_unregister(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
if (priv->pending_alarm)
rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
priv->pending_alarm = 0;
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ priv->intr_handle_socket.fd = 0;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
}
/**
@@ -1250,20 +1317,29 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
{
int rc, flags;
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
+ assert(!mlx5_is_secondary());
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
INFO("failed to change file descriptor async event queue");
dev->data->dev_conf.intr_conf.lsc = 0;
- } else {
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ }
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rmv) {
priv->intr_handle.fd = priv->ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle,
- mlx5_dev_interrupt_handler,
- dev);
+ mlx5_dev_interrupt_handler, dev);
+ }
+
+ rc = priv_socket_init(priv);
+ if (!rc && priv->primary_socket) {
+ priv->intr_handle_socket.fd = priv->primary_socket;
+ priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle_socket,
+ mlx5_dev_handler_socket, dev);
}
}
@@ -1271,7 +1347,9 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
* Change the link state (UP / DOWN).
*
* @param priv
- * Pointer to Ethernet device structure.
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
* @param up
* Nonzero for link up, otherwise link down.
*
@@ -1279,17 +1357,16 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
* 0 on success, errno value on failure.
*/
static int
-priv_set_link(struct priv *priv, int up)
+priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
{
- struct rte_eth_dev *dev = priv->dev;
int err;
if (up) {
err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
if (err)
return err;
- priv_select_tx_function(priv);
- priv_select_rx_function(priv);
+ priv_dev_select_tx_function(priv, dev);
+ priv_dev_select_rx_function(priv, dev);
} else {
err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
if (err)
@@ -1316,7 +1393,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_set_link(priv, 0);
+ err = priv_dev_set_link(priv, dev, 0);
priv_unlock(priv);
return err;
}
@@ -1337,195 +1414,42 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_set_link(priv, 1);
+ err = priv_dev_set_link(priv, dev, 1);
priv_unlock(priv);
return err;
}
/**
- * Configure secondary process queues from a private data pointer (primary
- * or secondary) and update burst callbacks. Can take place only once.
- *
- * All queues must have been previously created by the primary process to
- * avoid undefined behavior.
- *
- * @param priv
- * Private data pointer from either primary or secondary process.
- *
- * @return
- * Private data pointer from secondary process, NULL in case of error.
- */
-struct priv *
-mlx5_secondary_data_setup(struct priv *priv)
-{
- unsigned int port_id = 0;
- struct mlx5_secondary_data *sd;
- void **tx_queues;
- void **rx_queues;
- unsigned int nb_tx_queues;
- unsigned int nb_rx_queues;
- unsigned int i;
-
- /* priv must be valid at this point. */
- assert(priv != NULL);
- /* priv->dev must also be valid but may point to local memory from
- * another process, possibly with the same address and must not
- * be dereferenced yet. */
- assert(priv->dev != NULL);
- /* Determine port ID by finding out where priv comes from. */
- while (1) {
- sd = &mlx5_secondary_data[port_id];
- rte_spinlock_lock(&sd->lock);
- /* Primary process? */
- if (sd->primary_priv == priv)
- break;
- /* Secondary process? */
- if (sd->data.dev_private == priv)
- break;
- rte_spinlock_unlock(&sd->lock);
- if (++port_id == RTE_DIM(mlx5_secondary_data))
- port_id = 0;
- }
- /* Switch to secondary private structure. If private data has already
- * been updated by another thread, there is nothing else to do. */
- priv = sd->data.dev_private;
- if (priv->dev->data == &sd->data)
- goto end;
- /* Sanity checks. Secondary private structure is supposed to point
- * to local eth_dev, itself still pointing to the shared device data
- * structure allocated by the primary process. */
- assert(sd->shared_dev_data != &sd->data);
- assert(sd->data.nb_tx_queues == 0);
- assert(sd->data.tx_queues == NULL);
- assert(sd->data.nb_rx_queues == 0);
- assert(sd->data.rx_queues == NULL);
- assert(priv != sd->primary_priv);
- assert(priv->dev->data == sd->shared_dev_data);
- assert(priv->txqs_n == 0);
- assert(priv->txqs == NULL);
- assert(priv->rxqs_n == 0);
- assert(priv->rxqs == NULL);
- nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
- nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
- /* Allocate local storage for queues. */
- tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
- sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
- RTE_CACHE_LINE_SIZE);
- rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
- sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
- RTE_CACHE_LINE_SIZE);
- if (tx_queues == NULL || rx_queues == NULL)
- goto error;
- /* Lock to prevent control operations during setup. */
- priv_lock(priv);
- /* TX queues. */
- for (i = 0; i != nb_tx_queues; ++i) {
- struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
- struct txq_ctrl *primary_txq_ctrl;
- struct txq_ctrl *txq_ctrl;
-
- if (primary_txq == NULL)
- continue;
- primary_txq_ctrl = container_of(primary_txq,
- struct txq_ctrl, txq);
- txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl) +
- (1 << primary_txq->elts_n) *
- sizeof(struct rte_mbuf *), 0,
- primary_txq_ctrl->socket);
- if (txq_ctrl != NULL) {
- if (txq_ctrl_setup(priv->dev,
- txq_ctrl,
- 1 << primary_txq->elts_n,
- primary_txq_ctrl->socket,
- NULL) == 0) {
- txq_ctrl->txq.stats.idx =
- primary_txq->stats.idx;
- tx_queues[i] = &txq_ctrl->txq;
- continue;
- }
- rte_free(txq_ctrl);
- }
- while (i) {
- txq_ctrl = tx_queues[--i];
- txq_cleanup(txq_ctrl);
- rte_free(txq_ctrl);
- }
- goto error;
- }
- /* RX queues. */
- for (i = 0; i != nb_rx_queues; ++i) {
- struct rxq_ctrl *primary_rxq =
- container_of((*sd->primary_priv->rxqs)[i],
- struct rxq_ctrl, rxq);
-
- if (primary_rxq == NULL)
- continue;
- /* Not supported yet. */
- rx_queues[i] = NULL;
- }
- /* Update everything. */
- priv->txqs = (void *)tx_queues;
- priv->txqs_n = nb_tx_queues;
- priv->rxqs = (void *)rx_queues;
- priv->rxqs_n = nb_rx_queues;
- sd->data.rx_queues = rx_queues;
- sd->data.tx_queues = tx_queues;
- sd->data.nb_rx_queues = nb_rx_queues;
- sd->data.nb_tx_queues = nb_tx_queues;
- sd->data.dev_link = sd->shared_dev_data->dev_link;
- sd->data.mtu = sd->shared_dev_data->mtu;
- memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
- sizeof(sd->data.rx_queue_state));
- memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
- sizeof(sd->data.tx_queue_state));
- sd->data.dev_flags = sd->shared_dev_data->dev_flags;
- /* Use local data from now on. */
- rte_mb();
- priv->dev->data = &sd->data;
- rte_mb();
- priv_select_tx_function(priv);
- priv_select_rx_function(priv);
- priv_unlock(priv);
-end:
- /* More sanity checks. */
- assert(priv->dev->data == &sd->data);
- rte_spinlock_unlock(&sd->lock);
- return priv;
-error:
- priv_unlock(priv);
- rte_free(tx_queues);
- rte_free(rx_queues);
- rte_spinlock_unlock(&sd->lock);
- return NULL;
-}
-
-/**
* Configure the TX function to use.
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*/
void
-priv_select_tx_function(struct priv *priv)
+priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
{
- priv->dev->tx_pkt_burst = mlx5_tx_burst;
+ assert(priv != NULL);
+ assert(dev != NULL);
+ dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0)
- priv->dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
- priv->dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ dev->tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
- priv->dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ dev->tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
} else if (priv->mps && priv->txq_inline) {
- priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
} else if (priv->mps) {
- priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
}
@@ -1534,16 +1458,19 @@ priv_select_tx_function(struct priv *priv)
* Configure the RX function to use.
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private data structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*/
void
-priv_select_rx_function(struct priv *priv)
+priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
{
+ assert(priv != NULL);
+ assert(dev != NULL);
if (priv_check_vec_rx_support(priv) > 0) {
- priv_prep_vec_rx_function(priv);
- priv->dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ dev->rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
} else {
- priv->dev->rx_pkt_burst = mlx5_rx_burst;
+ dev->rx_pkt_burst = mlx5_rx_burst;
}
}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
deleted file mode 100644
index 34a7e69f..00000000
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ /dev/null
@@ -1,1101 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <assert.h>
-#include <stdint.h>
-#include <string.h>
-#include <errno.h>
-
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <rte_ether.h>
-#include <rte_malloc.h>
-#include <rte_ethdev.h>
-#include <rte_common.h>
-#include <rte_flow.h>
-#include <rte_flow_driver.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-#include "mlx5.h"
-#include "mlx5_rxtx.h"
-
-struct fdir_flow_desc {
- uint16_t dst_port;
- uint16_t src_port;
- uint32_t src_ip[4];
- uint32_t dst_ip[4];
- uint8_t mac[6];
- uint16_t vlan_tag;
- enum hash_rxq_type type;
-};
-
-struct mlx5_fdir_filter {
- LIST_ENTRY(mlx5_fdir_filter) next;
- uint16_t queue; /* Queue assigned to if FDIR match. */
- enum rte_eth_fdir_behavior behavior;
- struct fdir_flow_desc desc;
- struct ibv_exp_flow *flow;
-};
-
-LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);
-
-/**
- * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
- *
- * @param[in] fdir_filter
- * DPDK filter structure to convert.
- * @param[out] desc
- * Resulting mlx5 filter descriptor.
- * @param mode
- * Flow director mode.
- */
-static void
-fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
- struct fdir_flow_desc *desc, enum rte_fdir_mode mode)
-{
- /* Initialize descriptor. */
- memset(desc, 0, sizeof(*desc));
-
- /* Set VLAN ID. */
- desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci;
-
- /* Set MAC address. */
- if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- rte_memcpy(desc->mac,
- fdir_filter->input.flow.mac_vlan_flow.mac_addr.
- addr_bytes,
- sizeof(desc->mac));
- desc->type = HASH_RXQ_ETH;
- return;
- }
-
- /* Set mode */
- switch (fdir_filter->input.flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- desc->type = HASH_RXQ_UDPV4;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- desc->type = HASH_RXQ_TCPV4;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- desc->type = HASH_RXQ_IPV4;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- desc->type = HASH_RXQ_UDPV6;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- desc->type = HASH_RXQ_TCPV6;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- desc->type = HASH_RXQ_IPV6;
- break;
- default:
- break;
- }
-
- /* Set flow values */
- switch (fdir_filter->input.flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
- desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
- /* fallthrough */
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
- desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
- desc->dst_port = fdir_filter->input.flow.udp6_flow.dst_port;
- /* Fall through. */
- case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- rte_memcpy(desc->src_ip,
- fdir_filter->input.flow.ipv6_flow.src_ip,
- sizeof(desc->src_ip));
- rte_memcpy(desc->dst_ip,
- fdir_filter->input.flow.ipv6_flow.dst_ip,
- sizeof(desc->dst_ip));
- break;
- default:
- break;
- }
-}
-
-/**
- * Check if two flow descriptors overlap according to configured mask.
- *
- * @param priv
- * Private structure that provides flow director mask.
- * @param desc1
- * First flow descriptor to compare.
- * @param desc2
- * Second flow descriptor to compare.
- *
- * @return
- * Nonzero if descriptors overlap.
- */
-static int
-priv_fdir_overlap(const struct priv *priv,
- const struct fdir_flow_desc *desc1,
- const struct fdir_flow_desc *desc2)
-{
- const struct rte_eth_fdir_masks *mask =
- &priv->dev->data->dev_conf.fdir_conf.mask;
- unsigned int i;
-
- if (desc1->type != desc2->type)
- return 0;
- /* Ignore non masked bits. */
- for (i = 0; i != RTE_DIM(desc1->mac); ++i)
- if ((desc1->mac[i] & mask->mac_addr_byte_mask) !=
- (desc2->mac[i] & mask->mac_addr_byte_mask))
- return 0;
- if (((desc1->src_port & mask->src_port_mask) !=
- (desc2->src_port & mask->src_port_mask)) ||
- ((desc1->dst_port & mask->dst_port_mask) !=
- (desc2->dst_port & mask->dst_port_mask)))
- return 0;
- switch (desc1->type) {
- case HASH_RXQ_IPV4:
- case HASH_RXQ_UDPV4:
- case HASH_RXQ_TCPV4:
- if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
- (desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
- ((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
- (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
- return 0;
- break;
- case HASH_RXQ_IPV6:
- case HASH_RXQ_UDPV6:
- case HASH_RXQ_TCPV6:
- for (i = 0; i != RTE_DIM(desc1->src_ip); ++i)
- if (((desc1->src_ip[i] & mask->ipv6_mask.src_ip[i]) !=
- (desc2->src_ip[i] & mask->ipv6_mask.src_ip[i])) ||
- ((desc1->dst_ip[i] & mask->ipv6_mask.dst_ip[i]) !=
- (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
- return 0;
- break;
- default:
- break;
- }
- return 1;
-}
-
-/**
- * Create flow director steering rule for a specific filter.
- *
- * @param priv
- * Private structure.
- * @param mlx5_fdir_filter
- * Filter to create a steering rule for.
- * @param fdir_queue
- * Flow director queue for matching packets.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_flow_add(struct priv *priv,
- struct mlx5_fdir_filter *mlx5_fdir_filter,
- struct fdir_queue *fdir_queue)
-{
- struct ibv_exp_flow *flow;
- struct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;
- enum rte_fdir_mode fdir_mode =
- priv->dev->data->dev_conf.fdir_conf.mode;
- struct rte_eth_fdir_masks *mask =
- &priv->dev->data->dev_conf.fdir_conf.mask;
- FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));
- struct ibv_exp_flow_attr *attr = &data->attr;
- uintptr_t spec_offset = (uintptr_t)&data->spec;
- struct ibv_exp_flow_spec_eth *spec_eth;
- struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
- struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
- struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
- struct mlx5_fdir_filter *iter_fdir_filter;
- unsigned int i;
-
- /* Abort if an existing flow overlaps this one to avoid packet
- * duplication, even if it targets another queue. */
- LIST_FOREACH(iter_fdir_filter, priv->fdir_filter_list, next)
- if ((iter_fdir_filter != mlx5_fdir_filter) &&
- (iter_fdir_filter->flow != NULL) &&
- (priv_fdir_overlap(priv,
- &mlx5_fdir_filter->desc,
- &iter_fdir_filter->desc)))
- return EEXIST;
-
- /*
- * No padding must be inserted by the compiler between attr and spec.
- * This layout is expected by libibverbs.
- */
- assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec_offset);
- priv_flow_attr(priv, attr, sizeof(data), desc->type);
-
- /* Set Ethernet spec */
- spec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;
-
- /* The first specification must be Ethernet. */
- assert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);
- assert(spec_eth->size == sizeof(*spec_eth));
-
- /* VLAN ID */
- spec_eth->val.vlan_tag = desc->vlan_tag & mask->vlan_tci_mask;
- spec_eth->mask.vlan_tag = mask->vlan_tci_mask;
-
- /* Update priority */
- attr->priority = 2;
-
- if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- /* MAC Address */
- for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) {
- spec_eth->val.dst_mac[i] =
- desc->mac[i] & mask->mac_addr_byte_mask;
- spec_eth->mask.dst_mac[i] = mask->mac_addr_byte_mask;
- }
- goto create_flow;
- }
-
- switch (desc->type) {
- case HASH_RXQ_IPV4:
- case HASH_RXQ_UDPV4:
- case HASH_RXQ_TCPV4:
- spec_offset += spec_eth->size;
-
- /* Set IP spec */
- spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
-
- /* The second specification must be IP. */
- assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
- assert(spec_ipv4->size == sizeof(*spec_ipv4));
-
- spec_ipv4->val.src_ip =
- desc->src_ip[0] & mask->ipv4_mask.src_ip;
- spec_ipv4->val.dst_ip =
- desc->dst_ip[0] & mask->ipv4_mask.dst_ip;
- spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
- spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
-
- /* Update priority */
- attr->priority = 1;
-
- if (desc->type == HASH_RXQ_IPV4)
- goto create_flow;
-
- spec_offset += spec_ipv4->size;
- break;
- case HASH_RXQ_IPV6:
- case HASH_RXQ_UDPV6:
- case HASH_RXQ_TCPV6:
- spec_offset += spec_eth->size;
-
- /* Set IP spec */
- spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
-
- /* The second specification must be IP. */
- assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
- assert(spec_ipv6->size == sizeof(*spec_ipv6));
-
- for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
- ((uint32_t *)spec_ipv6->val.src_ip)[i] =
- desc->src_ip[i] & mask->ipv6_mask.src_ip[i];
- ((uint32_t *)spec_ipv6->val.dst_ip)[i] =
- desc->dst_ip[i] & mask->ipv6_mask.dst_ip[i];
- }
- rte_memcpy(spec_ipv6->mask.src_ip,
- mask->ipv6_mask.src_ip,
- sizeof(spec_ipv6->mask.src_ip));
- rte_memcpy(spec_ipv6->mask.dst_ip,
- mask->ipv6_mask.dst_ip,
- sizeof(spec_ipv6->mask.dst_ip));
-
- /* Update priority */
- attr->priority = 1;
-
- if (desc->type == HASH_RXQ_IPV6)
- goto create_flow;
-
- spec_offset += spec_ipv6->size;
- break;
- default:
- ERROR("invalid flow attribute type");
- return EINVAL;
- }
-
- /* Set TCP/UDP flow specification. */
- spec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;
-
- /* The third specification must be TCP/UDP. */
- assert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||
- spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);
- assert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));
-
- spec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;
- spec_tcp_udp->val.dst_port = desc->dst_port & mask->dst_port_mask;
- spec_tcp_udp->mask.src_port = mask->src_port_mask;
- spec_tcp_udp->mask.dst_port = mask->dst_port_mask;
-
- /* Update priority */
- attr->priority = 0;
-
-create_flow:
-
- errno = 0;
- flow = ibv_exp_create_flow(fdir_queue->qp, attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow director configuration failed, errno=%d: %s",
- (void *)priv, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
-
- DEBUG("%p: added flow director rule (%p)", (void *)priv, (void *)flow);
- mlx5_fdir_filter->flow = flow;
- return 0;
-}
-
-/**
- * Destroy a flow director queue.
- *
- * @param fdir_queue
- * Flow director queue to be destroyed.
- */
-void
-priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)
-{
- struct mlx5_fdir_filter *fdir_filter;
-
- /* Disable filter flows still applying to this queue. */
- LIST_FOREACH(fdir_filter, priv->fdir_filter_list, next) {
- unsigned int idx = fdir_filter->queue;
- struct rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
-
- assert(idx < priv->rxqs_n);
- if (fdir_queue == rxq_ctrl->fdir_queue &&
- fdir_filter->flow != NULL) {
- claim_zero(ibv_exp_destroy_flow(fdir_filter->flow));
- fdir_filter->flow = NULL;
- }
- }
- assert(fdir_queue->qp);
- claim_zero(ibv_destroy_qp(fdir_queue->qp));
- assert(fdir_queue->ind_table);
- claim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue->ind_table));
- if (fdir_queue->wq)
- claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
- if (fdir_queue->cq)
- claim_zero(ibv_destroy_cq(fdir_queue->cq));
-#ifndef NDEBUG
- memset(fdir_queue, 0x2a, sizeof(*fdir_queue));
-#endif
- rte_free(fdir_queue);
-}
-
-/**
- * Create a flow director queue.
- *
- * @param priv
- * Private structure.
- * @param wq
- * Work queue to route matched packets to, NULL if one needs to
- * be created.
- *
- * @return
- * Related flow director queue on success, NULL otherwise.
- */
-static struct fdir_queue *
-priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,
- unsigned int socket)
-{
- struct fdir_queue *fdir_queue;
-
- fdir_queue = rte_calloc_socket(__func__, 1, sizeof(*fdir_queue),
- 0, socket);
- if (!fdir_queue) {
- ERROR("cannot allocate flow director queue");
- return NULL;
- }
- assert(priv->pd);
- assert(priv->ctx);
- if (!wq) {
- fdir_queue->cq = ibv_exp_create_cq(
- priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
- if (!fdir_queue->cq) {
- ERROR("cannot create flow director CQ");
- goto error;
- }
- fdir_queue->wq = ibv_exp_create_wq(
- priv->ctx,
- &(struct ibv_exp_wq_init_attr){
- .wq_type = IBV_EXP_WQT_RQ,
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- .pd = priv->pd,
- .cq = fdir_queue->cq,
- });
- if (!fdir_queue->wq) {
- ERROR("cannot create flow director WQ");
- goto error;
- }
- wq = fdir_queue->wq;
- }
- fdir_queue->ind_table = ibv_exp_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
- .pd = priv->pd,
- .log_ind_tbl_size = 0,
- .ind_tbl = &wq,
- .comp_mask = 0,
- });
- if (!fdir_queue->ind_table) {
- ERROR("cannot create flow director indirection table");
- goto error;
- }
- fdir_queue->qp = ibv_exp_create_qp(
- priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
- .rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- .rwq_ind_tbl = fdir_queue->ind_table,
- },
- .port_num = priv->port,
- });
- if (!fdir_queue->qp) {
- ERROR("cannot create flow director hash RX QP");
- goto error;
- }
- return fdir_queue;
-error:
- assert(fdir_queue);
- assert(!fdir_queue->qp);
- if (fdir_queue->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table
- (fdir_queue->ind_table));
- if (fdir_queue->wq)
- claim_zero(ibv_exp_destroy_wq(fdir_queue->wq));
- if (fdir_queue->cq)
- claim_zero(ibv_destroy_cq(fdir_queue->cq));
- rte_free(fdir_queue);
- return NULL;
-}
-
-/**
- * Get flow director queue for a specific RX queue, create it in case
- * it does not exist.
- *
- * @param priv
- * Private structure.
- * @param idx
- * RX queue index.
- *
- * @return
- * Related flow director queue on success, NULL otherwise.
- */
-static struct fdir_queue *
-priv_get_fdir_queue(struct priv *priv, uint16_t idx)
-{
- struct rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
- struct fdir_queue *fdir_queue = rxq_ctrl->fdir_queue;
-
- assert(rxq_ctrl->wq);
- if (fdir_queue == NULL) {
- fdir_queue = priv_fdir_queue_create(priv, rxq_ctrl->wq,
- rxq_ctrl->socket);
- rxq_ctrl->fdir_queue = fdir_queue;
- }
- return fdir_queue;
-}
-
-/**
- * Get or flow director drop queue. Create it if it does not exist.
- *
- * @param priv
- * Private structure.
- *
- * @return
- * Flow director drop queue on success, NULL otherwise.
- */
-static struct fdir_queue *
-priv_get_fdir_drop_queue(struct priv *priv)
-{
- struct fdir_queue *fdir_queue = priv->fdir_drop_queue;
-
- if (fdir_queue == NULL) {
- unsigned int socket = SOCKET_ID_ANY;
-
- /* Select a known NUMA socket if possible. */
- if (priv->rxqs_n && (*priv->rxqs)[0])
- socket = container_of((*priv->rxqs)[0],
- struct rxq_ctrl, rxq)->socket;
- fdir_queue = priv_fdir_queue_create(priv, NULL, socket);
- priv->fdir_drop_queue = fdir_queue;
- }
- return fdir_queue;
-}
-
-/**
- * Enable flow director filter and create steering rules.
- *
- * @param priv
- * Private structure.
- * @param mlx5_fdir_filter
- * Filter to create steering rule for.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_filter_enable(struct priv *priv,
- struct mlx5_fdir_filter *mlx5_fdir_filter)
-{
- struct fdir_queue *fdir_queue;
-
- /* Check if flow already exists. */
- if (mlx5_fdir_filter->flow != NULL)
- return 0;
-
- /* Get fdir_queue for specific queue. */
- if (mlx5_fdir_filter->behavior == RTE_ETH_FDIR_REJECT)
- fdir_queue = priv_get_fdir_drop_queue(priv);
- else
- fdir_queue = priv_get_fdir_queue(priv,
- mlx5_fdir_filter->queue);
-
- if (fdir_queue == NULL) {
- ERROR("failed to create flow director rxq for queue %d",
- mlx5_fdir_filter->queue);
- return EINVAL;
- }
-
- /* Create flow */
- return priv_fdir_flow_add(priv, mlx5_fdir_filter, fdir_queue);
-}
-
-/**
- * Initialize flow director filters list.
- *
- * @param priv
- * Private structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-fdir_init_filters_list(struct priv *priv)
-{
- /* Filter list initialization should be done only once. */
- if (priv->fdir_filter_list)
- return 0;
-
- /* Create filters list. */
- priv->fdir_filter_list =
- rte_calloc(__func__, 1, sizeof(*priv->fdir_filter_list), 0);
-
- if (priv->fdir_filter_list == NULL) {
- int err = ENOMEM;
-
- ERROR("cannot allocate flow director filter list: %s",
- strerror(err));
- return err;
- }
-
- LIST_INIT(priv->fdir_filter_list);
-
- return 0;
-}
-
-/**
- * Flush all filters.
- *
- * @param priv
- * Private structure.
- */
-static void
-priv_fdir_filter_flush(struct priv *priv)
-{
- struct mlx5_fdir_filter *mlx5_fdir_filter;
-
- while ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {
- struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
-
- DEBUG("%p: flushing flow director filter %p",
- (void *)priv, (void *)mlx5_fdir_filter);
- LIST_REMOVE(mlx5_fdir_filter, next);
- if (flow != NULL)
- claim_zero(ibv_exp_destroy_flow(flow));
- rte_free(mlx5_fdir_filter);
- }
-}
-
-/**
- * Remove all flow director filters and delete list.
- *
- * @param priv
- * Private structure.
- */
-void
-priv_fdir_delete_filters_list(struct priv *priv)
-{
- priv_fdir_filter_flush(priv);
- rte_free(priv->fdir_filter_list);
- priv->fdir_filter_list = NULL;
-}
-
-/**
- * Disable flow director, remove all steering rules.
- *
- * @param priv
- * Private structure.
- */
-void
-priv_fdir_disable(struct priv *priv)
-{
- unsigned int i;
- struct mlx5_fdir_filter *mlx5_fdir_filter;
-
- /* Run on every flow director filter and destroy flow handle. */
- LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
- struct ibv_exp_flow *flow;
-
- /* Only valid elements should be in the list */
- assert(mlx5_fdir_filter != NULL);
- flow = mlx5_fdir_filter->flow;
-
- /* Destroy flow handle */
- if (flow != NULL) {
- claim_zero(ibv_exp_destroy_flow(flow));
- mlx5_fdir_filter->flow = NULL;
- }
- }
-
- /* Destroy flow director context in each RX queue. */
- for (i = 0; (i != priv->rxqs_n); i++) {
- struct rxq_ctrl *rxq_ctrl;
-
- if (!(*priv->rxqs)[i])
- continue;
- rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
- if (!rxq_ctrl->fdir_queue)
- continue;
- priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
- rxq_ctrl->fdir_queue = NULL;
- }
- if (priv->fdir_drop_queue) {
- priv_fdir_queue_destroy(priv, priv->fdir_drop_queue);
- priv->fdir_drop_queue = NULL;
- }
-}
-
-/**
- * Enable flow director, create steering rules.
- *
- * @param priv
- * Private structure.
- */
-void
-priv_fdir_enable(struct priv *priv)
-{
- struct mlx5_fdir_filter *mlx5_fdir_filter;
-
- /* Run on every fdir filter and create flow handle */
- LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
- /* Only valid elements should be in the list */
- assert(mlx5_fdir_filter != NULL);
-
- priv_fdir_filter_enable(priv, mlx5_fdir_filter);
- }
-}
-
-/**
- * Find specific filter in list.
- *
- * @param priv
- * Private structure.
- * @param fdir_filter
- * Flow director filter to find.
- *
- * @return
- * Filter element if found, otherwise NULL.
- */
-static struct mlx5_fdir_filter *
-priv_find_filter_in_list(struct priv *priv,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct fdir_flow_desc desc;
- struct mlx5_fdir_filter *mlx5_fdir_filter;
- enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
-
- /* Get flow director filter to look for. */
- fdir_filter_to_flow_desc(fdir_filter, &desc, fdir_mode);
-
- /* Look for the requested element. */
- LIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {
- /* Only valid elements should be in the list. */
- assert(mlx5_fdir_filter != NULL);
-
- /* Return matching filter. */
- if (!memcmp(&desc, &mlx5_fdir_filter->desc, sizeof(desc)))
- return mlx5_fdir_filter;
- }
-
- /* Filter not found */
- return NULL;
-}
-
-/**
- * Add new flow director filter and store it in list.
- *
- * @param priv
- * Private structure.
- * @param fdir_filter
- * Flow director filter to add.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_filter_add(struct priv *priv,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct mlx5_fdir_filter *mlx5_fdir_filter;
- enum rte_fdir_mode fdir_mode = priv->dev->data->dev_conf.fdir_conf.mode;
- int err = 0;
-
- /* Validate queue number. */
- if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
- return EINVAL;
- }
-
- /* Duplicate filters are currently unsupported. */
- mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
- if (mlx5_fdir_filter != NULL) {
- ERROR("filter already exists");
- return EINVAL;
- }
-
- /* Create new flow director filter. */
- mlx5_fdir_filter =
- rte_calloc(__func__, 1, sizeof(*mlx5_fdir_filter), 0);
- if (mlx5_fdir_filter == NULL) {
- err = ENOMEM;
- ERROR("cannot allocate flow director filter: %s",
- strerror(err));
- return err;
- }
-
- /* Set action parameters. */
- mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
- mlx5_fdir_filter->behavior = fdir_filter->action.behavior;
-
- /* Convert to mlx5 filter descriptor. */
- fdir_filter_to_flow_desc(fdir_filter,
- &mlx5_fdir_filter->desc, fdir_mode);
-
- /* Insert new filter into list. */
- LIST_INSERT_HEAD(priv->fdir_filter_list, mlx5_fdir_filter, next);
-
- DEBUG("%p: flow director filter %p added",
- (void *)priv, (void *)mlx5_fdir_filter);
-
- /* Enable filter immediately if device is started. */
- if (priv->started)
- err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
-
- return err;
-}
-
-/**
- * Update queue for specific filter.
- *
- * @param priv
- * Private structure.
- * @param fdir_filter
- * Filter to be updated.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_filter_update(struct priv *priv,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct mlx5_fdir_filter *mlx5_fdir_filter;
-
- /* Validate queue number. */
- if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
- return EINVAL;
- }
-
- mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
- if (mlx5_fdir_filter != NULL) {
- struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
- int err = 0;
-
- /* Update queue number. */
- mlx5_fdir_filter->queue = fdir_filter->action.rx_queue;
-
- /* Destroy flow handle. */
- if (flow != NULL) {
- claim_zero(ibv_exp_destroy_flow(flow));
- mlx5_fdir_filter->flow = NULL;
- }
- DEBUG("%p: flow director filter %p updated",
- (void *)priv, (void *)mlx5_fdir_filter);
-
- /* Enable filter if device is started. */
- if (priv->started)
- err = priv_fdir_filter_enable(priv, mlx5_fdir_filter);
-
- return err;
- }
-
- /* Filter not found, create it. */
- DEBUG("%p: filter not found for update, creating new filter",
- (void *)priv);
- return priv_fdir_filter_add(priv, fdir_filter);
-}
-
-/**
- * Delete specific filter.
- *
- * @param priv
- * Private structure.
- * @param fdir_filter
- * Filter to be deleted.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_filter_delete(struct priv *priv,
- const struct rte_eth_fdir_filter *fdir_filter)
-{
- struct mlx5_fdir_filter *mlx5_fdir_filter;
-
- mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);
- if (mlx5_fdir_filter != NULL) {
- struct ibv_exp_flow *flow = mlx5_fdir_filter->flow;
-
- /* Remove element from list. */
- LIST_REMOVE(mlx5_fdir_filter, next);
-
- /* Destroy flow handle. */
- if (flow != NULL) {
- claim_zero(ibv_exp_destroy_flow(flow));
- mlx5_fdir_filter->flow = NULL;
- }
-
- DEBUG("%p: flow director filter %p deleted",
- (void *)priv, (void *)mlx5_fdir_filter);
-
- /* Delete filter. */
- rte_free(mlx5_fdir_filter);
-
- return 0;
- }
-
- ERROR("%p: flow director delete failed, cannot find filter",
- (void *)priv);
- return EINVAL;
-}
-
-/**
- * Get flow director information.
- *
- * @param priv
- * Private structure.
- * @param[out] fdir_info
- * Resulting flow director information.
- */
-static void
-priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
-{
- struct rte_eth_fdir_masks *mask =
- &priv->dev->data->dev_conf.fdir_conf.mask;
-
- fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
- fdir_info->guarant_spc = 0;
-
- rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
-
- fdir_info->max_flexpayload = 0;
- fdir_info->flow_types_mask[0] = 0;
-
- fdir_info->flex_payload_unit = 0;
- fdir_info->max_flex_payload_segment_num = 0;
- fdir_info->flex_payload_limit = 0;
- memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
-}
-
-/**
- * Deal with flow director operations.
- *
- * @param priv
- * Pointer to private structure.
- * @param filter_op
- * Operation to perform.
- * @param arg
- * Pointer to operation-specific structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
-{
- enum rte_fdir_mode fdir_mode =
- priv->dev->data->dev_conf.fdir_conf.mode;
- int ret = 0;
-
- if (filter_op == RTE_ETH_FILTER_NOP)
- return 0;
-
- if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
- fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- ERROR("%p: flow director mode %d not supported",
- (void *)priv, fdir_mode);
- return EINVAL;
- }
-
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- ret = priv_fdir_filter_add(priv, arg);
- break;
- case RTE_ETH_FILTER_UPDATE:
- ret = priv_fdir_filter_update(priv, arg);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = priv_fdir_filter_delete(priv, arg);
- break;
- case RTE_ETH_FILTER_FLUSH:
- priv_fdir_filter_flush(priv);
- break;
- case RTE_ETH_FILTER_INFO:
- priv_fdir_info_get(priv, arg);
- break;
- default:
- DEBUG("%p: unknown operation %u", (void *)priv, filter_op);
- ret = EINVAL;
- break;
- }
- return ret;
-}
-
-static const struct rte_flow_ops mlx5_flow_ops = {
- .validate = mlx5_flow_validate,
- .create = mlx5_flow_create,
- .destroy = mlx5_flow_destroy,
- .flush = mlx5_flow_flush,
- .query = NULL,
- .isolate = mlx5_flow_isolate,
-};
-
-/**
- * Manage filter operations.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
- * Pointer to operation-specific structure.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- int ret = EINVAL;
- struct priv *priv = dev->data->dev_private;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &mlx5_flow_ops;
- return 0;
- case RTE_ETH_FILTER_FDIR:
- priv_lock(priv);
- ret = priv_fdir_ctrl_func(priv, filter_op, arg);
- priv_unlock(priv);
- break;
- default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
- break;
- }
-
- return -ret;
-}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 86be9291..cd99cb07 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -52,13 +52,36 @@
#include "mlx5.h"
#include "mlx5_prm.h"
-/* Number of Work Queue necessary for the DROP queue. */
-#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
-#define MLX5_DROP_WQ_N 4
-#else
-#define MLX5_DROP_WQ_N 1
+/* Define minimal priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 4
+
+/* Internet Protocol versions. */
+#define MLX5_IPV4 4
+#define MLX5_IPV6 6
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+struct ibv_counter_set_init_attr {
+ int dummy;
+};
+struct ibv_flow_spec_counter_action {
+ int dummy;
+};
+struct ibv_counter_set {
+ int dummy;
+};
+
+static inline int
+ibv_destroy_counter_set(struct ibv_counter_set *cs)
+{
+ (void)cs;
+ return -ENOTSUP;
+}
#endif
+/* Dev ops structure defined in mlx5.c */
+extern const struct eth_dev_ops mlx5_dev_ops;
+extern const struct eth_dev_ops mlx5_dev_ops_isolate;
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
@@ -94,19 +117,144 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
const void *default_mask,
void *data);
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+struct mlx5_flow_parse;
+
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+ unsigned int size);
+
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
+
+static int
+mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
+
+/* Hash RX queue types. */
+enum hash_rxq_type {
+ HASH_RXQ_TCPV4,
+ HASH_RXQ_UDPV4,
+ HASH_RXQ_IPV4,
+ HASH_RXQ_TCPV6,
+ HASH_RXQ_UDPV6,
+ HASH_RXQ_IPV6,
+ HASH_RXQ_ETH,
+};
+
+/* Initialization data for hash RX queue. */
+struct hash_rxq_init {
+ uint64_t hash_fields; /* Fields that participate in the hash. */
+ uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
+ unsigned int flow_priority; /* Flow priority to use. */
+ unsigned int ip_version; /* Internet protocol. */
+};
+
+/* Initialization data for hash RX queues. */
+const struct hash_rxq_init hash_rxq_init[] = {
+ [HASH_RXQ_TCPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_UDPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_IPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4),
+ .dpdk_rss_hf = (ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4),
+ .flow_priority = 1,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_TCPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_UDPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_IPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6),
+ .dpdk_rss_hf = (ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6),
+ .flow_priority = 1,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_ETH] = {
+ .hash_fields = 0,
+ .dpdk_rss_hf = 0,
+ .flow_priority = 2,
+ },
+};
+
+/* Number of entries in hash_rxq_init[]. */
+const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
+/** Structure for holding counter stats. */
+struct mlx5_flow_counter_stats {
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/** Structure for Drop queue. */
+struct mlx5_hrxq_drop {
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
- struct ibv_exp_wq *wq; /**< Verbs work queue. */
+ struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */
- uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
+};
+
+/* Flows structures. */
+struct mlx5_flow {
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+};
+
+/* Drop flows structures. */
+struct mlx5_flow_drop {
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct rxq *rxqs[]; /**< Pointer to the queues array. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+ uint16_t (*queues)[]; /**< Queues indexes to use. */
+ struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+ uint8_t rss_key[40]; /**< copy of the RSS key. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
+ union {
+ struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
+ /**< Flow with Rx queue. */
+ struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
+ };
};
/** Static initializer for items. */
@@ -157,6 +305,9 @@ static const enum rte_flow_action_type valid_actions[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ RTE_FLOW_ACTION_TYPE_COUNT,
+#endif
RTE_FLOW_ACTION_TYPE_END,
};
@@ -179,7 +330,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_eth_mask,
.mask_sz = sizeof(struct rte_flow_item_eth),
.convert = mlx5_flow_create_eth,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
@@ -208,7 +359,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_ipv4_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv4),
.convert = mlx5_flow_create_ipv4,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
@@ -236,7 +387,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_ipv6_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv6),
.convert = mlx5_flow_create_ipv6,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
@@ -250,7 +401,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_udp_mask,
.mask_sz = sizeof(struct rte_flow_item_udp),
.convert = mlx5_flow_create_udp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.actions = valid_actions,
@@ -263,7 +414,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_tcp_mask,
.mask_sz = sizeof(struct rte_flow_item_tcp),
.convert = mlx5_flow_create_tcp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
@@ -274,33 +425,76 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_vxlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vxlan),
.convert = mlx5_flow_create_vxlan,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+ .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
},
};
/** Structure to pass to the conversion function. */
-struct mlx5_flow {
- struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
- unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+struct mlx5_flow_parse {
uint32_t inner; /**< Set once VXLAN is encountered. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
-};
-
-/** Structure for Drop queue. */
-struct rte_flow_drop {
- struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
-};
-
-struct mlx5_flow_action {
- uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t create:1;
+ /**< Whether resources should remain after a validate. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
+ uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint16_t queues_n; /**< Number of entries in queue[]. */
+ struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+ uint8_t rss_key[40]; /**< copy of the RSS key. */
+ enum hash_rxq_type layer; /**< Last pattern layer detected. */
+ struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
+ union {
+ struct {
+ struct ibv_flow_attr *ibv_attr;
+ /**< Pointer to Verbs attributes. */
+ unsigned int offset;
+ /**< Current position or total size of the attribute. */
+ } queue[RTE_DIM(hash_rxq_init)];
+ struct {
+ struct ibv_flow_attr *ibv_attr;
+ /**< Pointer to Verbs attributes. */
+ unsigned int offset;
+ /**< Current position or total size of the attribute. */
+ } drop_q;
+ };
+};
+
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ .query = mlx5_flow_query,
+#else
+ .query = NULL,
+#endif
+ .isolate = mlx5_flow_isolate,
+};
+
+/* Convert FDIR request to Generic flow. */
+struct mlx5_fdir {
+ struct rte_flow_attr attr;
+ struct rte_flow_action actions[2];
+ struct rte_flow_item items[4];
+ struct rte_flow_item_eth l2;
+ struct rte_flow_item_eth l2_mask;
+ union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4;
+ struct rte_flow_action_queue queue;
+};
+
+/* Verbs specification header. */
+struct ibv_spec_header {
+ enum ibv_flow_spec_type type;
+ uint16_t size;
};
/**
@@ -367,38 +561,58 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
}
/**
- * Validate a flow supported by the NIC.
+ * Copy the RSS configuration from the user ones.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
+ * @param rss_conf
+ * User RSS configuration to save.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_flow_convert_rss_conf(struct priv *priv,
+ struct mlx5_flow_parse *parser,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ const struct rte_eth_rss_conf *rss =
+ rss_conf ? rss_conf : &priv->rss_conf;
+
+ if (rss->rss_key_len > 40)
+ return EINVAL;
+ parser->rss_conf.rss_key_len = rss->rss_key_len;
+ parser->rss_conf.rss_hf = rss->rss_hf;
+ memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len);
+ parser->rss_conf.rss_key = parser->rss_key;
+ return 0;
+}
+
+/**
+ * Extract attribute to the parser.
*
* @param priv
* Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] flow
- * Flow structure to update.
- * @param[in, out] action
- * Action structure to update.
+ * @param[in, out] parser
+ * Internal parser structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_validate(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow *flow,
- struct mlx5_flow_action *action)
+priv_flow_convert_attributes(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
{
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
-
(void)priv;
+ (void)parser;
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -406,7 +620,7 @@ priv_flow_validate(struct priv *priv,
"groups are not supported");
return -rte_errno;
}
- if (attr->priority) {
+ if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
@@ -427,56 +641,42 @@ priv_flow_validate(struct priv *priv,
"only ingress is supported");
return -rte_errno;
}
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx5_flow_items *token = NULL;
- unsigned int i;
- int err;
+ return 0;
+}
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
- continue;
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx5_flow_items[items->type];
- break;
- }
- }
- if (!token)
- goto exit_item_not_supported;
- cur_item = token;
- err = mlx5_flow_item_validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
- goto exit_item_not_supported;
- if (flow->ibv_attr && cur_item->convert) {
- err = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- flow);
- if (err)
- goto exit_item_not_supported;
- } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
- if (flow->inner) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "cannot recognize multiple"
- " VXLAN encapsulations");
- return -rte_errno;
- }
- flow->inner = 1;
- }
- flow->offset += cur_item->dst_sz;
- }
+/**
+ * Extract actions request to the parser.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert_actions(struct priv *priv,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
+{
+ /*
+ * Add default RSS configuration necessary for Verbs to create QP even
+ * if no RSS is necessary.
+ */
+ priv_flow_convert_rss_conf(priv, parser,
+ (const struct rte_eth_rss_conf *)
+ &priv->rss_conf);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action->drop = 1;
+ parser->drop = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
@@ -486,13 +686,13 @@ priv_flow_validate(struct priv *priv,
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- for (n = 0; n < action->queues_n; ++n) {
- if (action->queues[n] == queue->index) {
+ for (n = 0; n < parser->queues_n; ++n) {
+ if (parser->queues[n] == queue->index) {
found = 1;
break;
}
}
- if (action->queues_n > 1 && !found) {
+ if (parser->queues_n > 1 && !found) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
@@ -500,9 +700,8 @@ priv_flow_validate(struct priv *priv,
return -rte_errno;
}
if (!found) {
- action->queue = 1;
- action->queues_n = 1;
- action->queues[0] = queue->index;
+ parser->queues_n = 1;
+ parser->queues[0] = queue->index;
}
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
@@ -517,12 +716,12 @@ priv_flow_validate(struct priv *priv,
"no valid queues");
return -rte_errno;
}
- if (action->queues_n == 1) {
+ if (parser->queues_n == 1) {
uint16_t found = 0;
- assert(action->queues_n);
+ assert(parser->queues_n);
for (n = 0; n < rss->num; ++n) {
- if (action->queues[0] ==
+ if (parser->queues[0] ==
rss->queue[n]) {
found = 1;
break;
@@ -547,10 +746,17 @@ priv_flow_validate(struct priv *priv,
return -rte_errno;
}
}
- action->queue = 1;
for (n = 0; n < rss->num; ++n)
- action->queues[n] = rss->queue[n];
- action->queues_n = rss->num;
+ parser->queues[n] = rss->queue[n];
+ parser->queues_n = rss->num;
+ if (priv_flow_convert_rss_conf(priv, parser,
+ rss->rss_conf)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "wrong RSS configuration");
+ return -rte_errno;
+ }
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
@@ -570,30 +776,25 @@ priv_flow_validate(struct priv *priv,
" and 16777199");
return -rte_errno;
}
- action->mark = 1;
- action->mark_id = mark->id;
+ parser->mark = 1;
+ parser->mark_id = mark->id;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
- action->mark = 1;
+ parser->mark = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
+ priv->counter_set_supported) {
+ parser->count = 1;
} else {
goto exit_action_not_supported;
}
}
- if (action->mark && !flow->ibv_attr && !action->drop)
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- if (!flow->ibv_attr && action->drop)
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
- if (!action->queue && !action->drop) {
+ if (parser->drop && parser->mark)
+ parser->mark = 0;
+ if (!parser->queues_n && !parser->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
}
return 0;
-exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
exit_action_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "action not supported");
@@ -601,34 +802,467 @@ exit_action_not_supported:
}
/**
- * Validate a flow supported by the NIC.
+ * Validate items.
*
- * @see rte_flow_validate()
- * @see rte_flow_ops
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static int
+priv_flow_convert_items_validate(struct priv *priv,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
{
- struct priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+ unsigned int i;
+
+ (void)priv;
+ /* Initialise the offsets to start after verbs attribute. */
+ if (parser->drop) {
+ parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx5_flow_items *token = NULL;
+ unsigned int n;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx5_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = mlx5_flow_item_validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (parser->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ }
+ if (parser->drop) {
+ parser->drop_q.offset += cur_item->dst_sz;
+ } else if (parser->queues_n == 1) {
+ parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
+ } else {
+ for (n = 0; n != hash_rxq_init_n; ++n)
+ parser->queue[n].offset += cur_item->dst_sz;
+ }
+ }
+ if (parser->mark) {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset +=
+ sizeof(struct ibv_flow_spec_action_tag);
+ }
+ if (parser->count) {
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+
+ if (parser->drop) {
+ parser->drop_q.offset += size;
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset += size;
+ }
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+}
+
+/**
+ * Allocate memory space to store verbs flow attributes.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] priority
+ * Flow priority.
+ * @param[in] size
+ * Amount of byte to allocate.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A verbs flow attribute on success, NULL otherwise.
+ */
+static struct ibv_flow_attr*
+priv_flow_convert_allocate(struct priv *priv,
+ unsigned int priority,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ struct ibv_flow_attr *ibv_attr;
+
+ (void)priv;
+ ibv_attr = rte_calloc(__func__, 1, size, 0);
+ if (!ibv_attr) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate verbs spec attributes.");
+ return NULL;
+ }
+ ibv_attr->priority = priority;
+ return ibv_attr;
+}
+
+/**
+ * Finalise verbs flow attributes.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in, out] parser
+ * Internal parser structure.
+ */
+static void
+priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+{
+ const unsigned int ipv4 =
+ hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
+ const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
+ const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+ const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
+ const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
+ const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+ unsigned int i;
+
+ (void)priv;
+ if (parser->layer == HASH_RXQ_ETH) {
+ goto fill;
+ } else {
+ /*
+ * This layer becomes useless as the pattern define under
+ * layers.
+ */
+ rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
+ parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
+ }
+ /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
+ for (i = ohmin; i != (ohmax + 1); ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ /* Remove impossible flow according to the RSS configuration. */
+ if (hash_rxq_init[parser->layer].dpdk_rss_hf &
+ parser->rss_conf.rss_hf) {
+ /* Remove any other flow. */
+ for (i = hmin; i != (hmax + 1); ++i) {
+ if ((i == parser->layer) ||
+ (!parser->queue[i].ibv_attr))
+ continue;
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ } else if (!parser->queue[ip].ibv_attr) {
+ /* no RSS possible with the current configuration. */
+ parser->queues_n = 1;
+ return;
+ }
+fill:
+ /*
+ * Fill missing layers in verbs specifications, or compute the correct
+ * offset to allocate the memory space for the attributes and
+ * specifications.
+ */
+ for (i = 0; i != hash_rxq_init_n - 1; ++i) {
+ union {
+ struct ibv_flow_spec_ipv4_ext ipv4;
+ struct ibv_flow_spec_ipv6 ipv6;
+ struct ibv_flow_spec_tcp_udp udp_tcp;
+ } specs;
+ void *dst;
+ uint16_t size;
+
+ if (i == parser->layer)
+ continue;
+ if (parser->layer == HASH_RXQ_ETH) {
+ if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
+ size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
+ .type = IBV_FLOW_SPEC_IPV4_EXT,
+ .size = size,
+ };
+ } else {
+ size = sizeof(struct ibv_flow_spec_ipv6);
+ specs.ipv6 = (struct ibv_flow_spec_ipv6){
+ .type = IBV_FLOW_SPEC_IPV6,
+ .size = size,
+ };
+ }
+ if (parser->queue[i].ibv_attr) {
+ dst = (void *)((uintptr_t)
+ parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, &specs, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ }
+ parser->queue[i].offset += size;
+ }
+ if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
+ (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
+ size = sizeof(struct ibv_flow_spec_tcp_udp);
+ specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = ((i == HASH_RXQ_UDPV4 ||
+ i == HASH_RXQ_UDPV6) ?
+ IBV_FLOW_SPEC_UDP :
+ IBV_FLOW_SPEC_TCP),
+ .size = size,
+ };
+ if (parser->queue[i].ibv_attr) {
+ dst = (void *)((uintptr_t)
+ parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, &specs, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ }
+ parser->queue[i].offset += size;
+ }
+ }
+}
+
+/**
+ * Validate and convert a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
+{
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+ unsigned int i;
int ret;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
- struct mlx5_flow_action action = {
- .queue = 0,
- .drop = 0,
- .mark = 0,
+
+ /* First step. Validate the attributes, items and actions. */
+ *parser = (struct mlx5_flow_parse){
+ .create = parser->create,
+ .layer = HASH_RXQ_ETH,
.mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues_n = 0,
};
-
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow,
- &action);
- priv_unlock(priv);
+ ret = priv_flow_convert_attributes(priv, attr, error, parser);
+ if (ret)
+ return ret;
+ ret = priv_flow_convert_actions(priv, actions, error, parser);
+ if (ret)
+ return ret;
+ ret = priv_flow_convert_items_validate(priv, items, error, parser);
+ if (ret)
+ return ret;
+ priv_flow_convert_finalise(priv, parser);
+ /*
+ * Second step.
+ * Allocate the memory space to store verbs specifications.
+ */
+ if (parser->drop) {
+ parser->drop_q.ibv_attr =
+ priv_flow_convert_allocate(priv, attr->priority,
+ parser->drop_q.offset,
+ error);
+ if (!parser->drop_q.ibv_attr)
+ return ENOMEM;
+ parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+ } else if (parser->queues_n == 1) {
+ unsigned int priority =
+ attr->priority +
+ hash_rxq_init[HASH_RXQ_ETH].flow_priority;
+ unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
+
+ parser->queue[HASH_RXQ_ETH].ibv_attr =
+ priv_flow_convert_allocate(priv, priority,
+ offset, error);
+ if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
+ return ENOMEM;
+ parser->queue[HASH_RXQ_ETH].offset =
+ sizeof(struct ibv_flow_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ unsigned int priority =
+ attr->priority +
+ hash_rxq_init[i].flow_priority;
+ unsigned int offset;
+
+ if (!(parser->rss_conf.rss_hf &
+ hash_rxq_init[i].dpdk_rss_hf) &&
+ (i != HASH_RXQ_ETH))
+ continue;
+ offset = parser->queue[i].offset;
+ parser->queue[i].ibv_attr =
+ priv_flow_convert_allocate(priv, priority,
+ offset, error);
+ if (!parser->queue[i].ibv_attr)
+ goto exit_enomem;
+ parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+ }
+ }
+ /* Third step. Conversion parse, fill the specifications. */
+ parser->inner = 0;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ cur_item = &mlx5_flow_items[items->type];
+ ret = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ parser);
+ if (ret) {
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ goto exit_free;
+ }
+ }
+ if (parser->mark)
+ mlx5_flow_create_flag_mark(parser, parser->mark_id);
+ if (parser->count && parser->create) {
+ mlx5_flow_create_count(priv, parser);
+ if (!parser->cs)
+ goto exit_count_error;
+ }
+ /*
+ * Last step. Complete missing specification to reach the RSS
+ * configuration.
+ */
+ if (parser->drop) {
+ /*
+ * Drop queue priority needs to be adjusted to
+ * their most specific layer priority.
+ */
+ parser->drop_q.ibv_attr->priority =
+ attr->priority +
+ hash_rxq_init[parser->layer].flow_priority;
+ } else if (parser->queues_n > 1) {
+ priv_flow_convert_finalise(priv, parser);
+ } else {
+ /*
+ * Action queue have their priority overridden with
+ * Ethernet priority, this priority needs to be adjusted to
+ * their most specific layer priority.
+ */
+ parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
+ attr->priority +
+ hash_rxq_init[parser->layer].flow_priority;
+ }
+exit_free:
+ /* Only verification is expected, all resources should be released. */
+ if (!parser->create) {
+ if (parser->drop) {
+ rte_free(parser->drop_q.ibv_attr);
+ parser->drop_q.ibv_attr = NULL;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ }
return ret;
+exit_enomem:
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot allocate verbs spec attributes.");
+ return ret;
+exit_count_error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create counter.");
+ return rte_errno;
+}
+
+/**
+ * Copy the specification created into the flow.
+ *
+ * @param parser
+ * Internal parser structure.
+ * @param src
+ * Create specification.
+ * @param size
+ * Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+ unsigned int size)
+{
+ unsigned int i;
+ void *dst;
+
+ if (parser->drop) {
+ dst = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset);
+ memcpy(dst, src, size);
+ ++parser->drop_q.ibv_attr->num_of_specs;
+ parser->drop_q.offset += size;
+ return;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ /* Specification must be the same l3 type or none. */
+ if (parser->layer == HASH_RXQ_ETH ||
+ (hash_rxq_init[parser->layer].ip_version ==
+ hash_rxq_init[i].ip_version) ||
+ (hash_rxq_init[i].ip_version == 0)) {
+ dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, src, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ parser->queue[i].offset += size;
+ }
+ }
}
/**
@@ -648,35 +1282,35 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
- unsigned int i;
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 2;
- flow->hash_fields = 0;
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *eth = (struct ibv_exp_flow_spec_eth) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = parser->inner | IBV_FLOW_SPEC_ETH,
.size = eth_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth->val.ether_type = spec->type;
- memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth->mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
- eth->val.src_mac[i] &= eth->mask.src_mac[i];
- }
- eth->val.ether_type &= eth->mask.ether_type;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_ETH;
+ if (spec) {
+ unsigned int i;
+
+ if (!mask)
+ mask = default_mask;
+ memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ mlx5_flow_create_copy(parser, &eth, eth_size);
return 0;
}
@@ -697,18 +1331,34 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
- eth->val.vlan_tag &= eth->mask.vlan_tag;
+ if (spec) {
+ unsigned int i;
+ if (!mask)
+ mask = default_mask;
+
+ if (parser->drop) {
+ eth = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset - eth_size);
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+
+ eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset - eth_size);
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ }
+ }
return 0;
}
@@ -729,40 +1379,38 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv4_ext *ipv4;
- unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 1;
- flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
- IBV_EXP_RX_HASH_DST_IPV4);
- ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
.size = ipv4_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4->val.src_ip &= ipv4->mask.src_ip;
- ipv4->val.dst_ip &= ipv4->mask.dst_ip;
- ipv4->val.proto &= ipv4->mask.proto;
- ipv4->val.tos &= ipv4->mask.tos;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_IPV4;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
return 0;
}
@@ -783,43 +1431,42 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv6_ext *ipv6;
- unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);
- unsigned int i;
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 1;
- flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
- IBV_EXP_RX_HASH_DST_IPV6);
- ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = parser->inner | IBV_FLOW_SPEC_IPV6,
.size = ipv6_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6->val.src_ip));
- memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6->val.dst_ip));
- memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6->mask.src_ip));
- memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6->mask.dst_ip));
- ipv6->mask.flow_label = mask->hdr.vtc_flow;
- ipv6->mask.next_hdr = mask->hdr.proto;
- ipv6->mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
- ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
- ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
- }
- ipv6->val.flow_label &= ipv6->mask.flow_label;
- ipv6->val.next_hdr &= ipv6->mask.next_hdr;
- ipv6->val.hop_limit &= ipv6->mask.hop_limit;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_IPV6;
+ if (spec) {
+ unsigned int i;
+
+ if (!mask)
+ mask = default_mask;
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ ipv6.mask.flow_label = mask->hdr.vtc_flow;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
return 0;
}
@@ -840,30 +1487,32 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *udp;
- unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
- IBV_EXP_RX_HASH_DST_PORT_UDP);
- udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *udp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = parser->inner | IBV_FLOW_SPEC_UDP,
.size = udp_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- udp->val.dst_port = spec->hdr.dst_port;
- udp->val.src_port = spec->hdr.src_port;
- udp->mask.dst_port = mask->hdr.dst_port;
- udp->mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp->val.src_port &= udp->mask.src_port;
- udp->val.dst_port &= udp->mask.dst_port;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner) {
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_UDPV4;
+ else
+ parser->layer = HASH_RXQ_UDPV6;
+ }
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ mlx5_flow_create_copy(parser, &udp, udp_size);
return 0;
}
@@ -884,30 +1533,32 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *tcp;
- unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
- IBV_EXP_RX_HASH_DST_PORT_TCP);
- tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = parser->inner | IBV_FLOW_SPEC_TCP,
.size = tcp_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- tcp->val.dst_port = spec->hdr.dst_port;
- tcp->val.src_port = spec->hdr.src_port;
- tcp->mask.dst_port = mask->hdr.dst_port;
- tcp->mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp->val.src_port &= tcp->mask.src_port;
- tcp->val.dst_port &= tcp->mask.dst_port;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner) {
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_TCPV4;
+ else
+ parser->layer = HASH_RXQ_TCPV6;
+ }
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ mlx5_flow_create_copy(parser, &tcp, tcp_size);
return 0;
}
@@ -928,57 +1579,97 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tunnel *vxlan;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
union vni {
uint32_t vlan_id;
uint8_t vni[4];
} id;
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
id.vni[0] = 0;
- vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *vxlan = (struct ibv_exp_flow_spec_tunnel) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
- flow->inner = IBV_EXP_FLOW_SPEC_INNER;
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan->val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan->mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule.
+ * To avoid such situation, VNI 0 is currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return EINVAL;
+ mlx5_flow_create_copy(parser, &vxlan, size);
return 0;
}
/**
* Convert mark/flag action to Verbs specification.
*
- * @param flow
- * Pointer to MLX5 flow structure.
+ * @param parser
+ * Internal parser structure.
* @param mark_id
* Mark identifier.
*/
static int
-mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
{
- struct ibv_exp_flow_spec_action_tag *tag;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
-
- tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tag = (struct ibv_exp_flow_spec_action_tag){
- .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
.size = size,
.tag_id = mlx5_flow_mark_set(mark_id),
};
- ++flow->ibv_attr->num_of_specs;
+
+ assert(parser->mark);
+ mlx5_flow_create_copy(parser, &tag, size);
+ return 0;
+}
+
+/**
+ * Convert count action to Verbs specification.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Pointer to MLX5 flow parser structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+mlx5_flow_create_count(struct priv *priv __rte_unused,
+ struct mlx5_flow_parse *parser __rte_unused)
+{
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_counter_set_init_attr init_attr = {0};
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ .counter_set_handle = 0,
+ };
+
+ init_attr.counter_set_id = 0;
+ parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
+ if (!parser->cs)
+ return EINVAL;
+ counter.counter_set_handle = parser->cs->handle;
+ mlx5_flow_create_copy(parser, &counter, size);
+#endif
return 0;
}
@@ -987,59 +1678,127 @@ mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
*
* @param priv
* Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
* @param flow
- * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * Pointer to the rte_flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow if the rule could be created.
+ * 0 on success, errno value on failure.
*/
-static struct rte_flow *
+static int
priv_flow_create_action_queue_drop(struct priv *priv,
- struct mlx5_flow *flow,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- struct ibv_exp_flow_spec_action_drop *drop;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
+ struct ibv_flow_spec_action_drop *drop;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ int err = 0;
assert(priv->pd);
assert(priv->ctx);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
- }
- rte_flow->drop = 1;
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *drop = (struct ibv_exp_flow_spec_action_drop){
- .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+ flow->drop = 1;
+ drop = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
.size = size,
};
- ++flow->ibv_attr->num_of_specs;
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
- rte_flow->ibv_attr = flow->ibv_attr;
- if (!priv->started)
- return rte_flow;
- rte_flow->qp = priv->flow_drop_queue->qp;
- rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
- rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
+ ++parser->drop_q.ibv_attr->num_of_specs;
+ parser->drop_q.offset += size;
+ flow->drxq.ibv_attr = parser->drop_q.ibv_attr;
+ if (parser->count)
+ flow->cs = parser->cs;
+ if (!priv->dev->data->dev_started)
+ return 0;
+ parser->drop_q.ibv_attr = NULL;
+ flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
+ flow->drxq.ibv_attr);
+ if (!flow->drxq.ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
+ err = ENOMEM;
goto error;
}
- return rte_flow;
+ return 0;
error:
- assert(rte_flow);
- rte_free(rte_flow);
- return NULL;
+ assert(flow);
+ if (flow->drxq.ibv_flow) {
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ flow->drxq.ibv_flow = NULL;
+ }
+ if (flow->drxq.ibv_attr) {
+ rte_free(flow->drxq.ibv_attr);
+ flow->drxq.ibv_attr = NULL;
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ parser->cs = NULL;
+ }
+ return err;
+}
+
+/**
+ * Create hash Rx queues when RSS is enabled.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
+ * @param flow
+ * Pointer to the rte_flow.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_create_action_queue_rss(struct priv *priv,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ uint64_t hash_fields;
+
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
+ parser->queue[i].ibv_attr = NULL;
+ hash_fields = hash_rxq_init[i].hash_fields;
+ if (!priv->dev->data->dev_started)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
+ if (flow->frxq[i].hrxq)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot create hash rxq");
+ return ENOMEM;
+ }
+ }
+ return 0;
}
/**
@@ -1047,112 +1806,79 @@ error:
*
* @param priv
* Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
* @param flow
- * MLX5 flow attributes (filled by mlx5_flow_validate()).
- * @param action
- * Target action structure.
+ * Pointer to the rte_flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow if the rule could be created.
+ * 0 on success, a errno value otherwise and rte_errno is set.
*/
-static struct rte_flow *
+static int
priv_flow_create_action_queue(struct priv *priv,
- struct mlx5_flow *flow,
- struct mlx5_flow_action *action,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
+ int err = 0;
unsigned int i;
- unsigned int j;
- const unsigned int wqs_n = 1 << log2above(action->queues_n);
- struct ibv_exp_wq *wqs[wqs_n];
assert(priv->pd);
assert(priv->ctx);
- assert(!action->drop);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) +
- sizeof(*rte_flow->rxqs) * action->queues_n, 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
- }
- for (i = 0; i < action->queues_n; ++i) {
- struct rxq_ctrl *rxq;
-
- rxq = container_of((*priv->rxqs)[action->queues[i]],
- struct rxq_ctrl, rxq);
- wqs[i] = rxq->wq;
- rte_flow->rxqs[i] = &rxq->rxq;
- ++rte_flow->rxqs_n;
- rxq->rxq.mark |= action->mark;
- }
- /* finalise indirection table. */
- for (j = 0; i < wqs_n; ++i, ++j) {
- wqs[i] = wqs[j];
- if (j == action->queues_n)
- j = 0;
- }
- rte_flow->mark = action->mark;
- rte_flow->ibv_attr = flow->ibv_attr;
- rte_flow->hash_fields = flow->hash_fields;
- rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
- .pd = priv->pd,
- .log_ind_tbl_size = log2above(action->queues_n),
- .ind_tbl = wqs,
- .comp_mask = 0,
- });
- if (!rte_flow->ind_table) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate indirection table");
- goto error;
- }
- rte_flow->qp = ibv_exp_create_qp(
- priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
- .rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = rte_flow->hash_fields,
- .rwq_ind_tbl = rte_flow->ind_table,
- },
- .port_num = priv->port,
- });
- if (!rte_flow->qp) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate QP");
+ assert(!parser->drop);
+ err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
+ if (err)
goto error;
+ if (parser->count)
+ flow->cs = parser->cs;
+ if (!priv->dev->data->dev_started)
+ return 0;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].hrxq)
+ continue;
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ DEBUG("%p type %d QP %p ibv_flow %p",
+ (void *)flow, i,
+ (void *)flow->frxq[i].hrxq,
+ (void *)flow->frxq[i].ibv_flow);
}
- if (!priv->started)
- return rte_flow;
- rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
- rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- goto error;
+ for (i = 0; i != parser->queues_n; ++i) {
+ struct mlx5_rxq_data *q =
+ (*priv->rxqs)[parser->queues[i]];
+
+ q->mark |= parser->mark;
}
- return rte_flow;
+ return 0;
error:
- assert(rte_flow);
- if (rte_flow->qp)
- ibv_destroy_qp(rte_flow->qp);
- if (rte_flow->ind_table)
- ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
- rte_free(rte_flow);
- return NULL;
+ assert(flow);
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (flow->frxq[i].ibv_flow) {
+ struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
+
+ claim_zero(ibv_destroy_flow(ibv_flow));
+ }
+ if (flow->frxq[i].hrxq)
+ mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ if (flow->frxq[i].ibv_attr)
+ rte_free(flow->frxq[i].ibv_attr);
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ parser->cs = NULL;
+ }
+ return err;
}
/**
@@ -1160,6 +1886,8 @@ error:
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
@@ -1174,65 +1902,86 @@ error:
*/
static struct rte_flow *
priv_flow_create(struct priv *priv,
+ struct mlx5_flows *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
- struct mlx5_flow_action action = {
- .queue = 0,
- .drop = 0,
- .mark = 0,
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues_n = 0,
- };
+ struct mlx5_flow_parse parser = { .create = 1, };
+ struct rte_flow *flow = NULL;
+ unsigned int i;
int err;
- err = priv_flow_validate(priv, attr, items, actions, error, &flow,
- &action);
+ err = priv_flow_convert(priv, attr, items, actions, error, &parser);
if (err)
goto exit;
- flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- flow.offset = sizeof(struct ibv_exp_flow_attr);
- if (!flow.ibv_attr) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate ibv_attr memory");
- goto exit;
+ flow = rte_calloc(__func__, 1,
+ sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
+ 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow memory");
+ return NULL;
}
- *flow.ibv_attr = (struct ibv_exp_flow_attr){
- .type = IBV_EXP_FLOW_ATTR_NORMAL,
- .size = sizeof(struct ibv_exp_flow_attr),
- .priority = attr->priority,
- .num_of_specs = 0,
- .port = 0,
- .flags = 0,
- .reserved = 0,
- };
- flow.inner = 0;
- flow.hash_fields = 0;
- claim_zero(priv_flow_validate(priv, attr, items, actions,
- error, &flow, &action));
- if (action.mark && !action.drop) {
- mlx5_flow_create_flag_mark(&flow, action.mark_id);
- flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
- }
- if (action.drop)
- rte_flow =
- priv_flow_create_action_queue_drop(priv, &flow, error);
- else
- rte_flow = priv_flow_create_action_queue(priv, &flow, &action,
+ /* Copy queues configuration. */
+ flow->queues = (uint16_t (*)[])(flow + 1);
+ memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
+ flow->queues_n = parser.queues_n;
+ /* Copy RSS configuration. */
+ flow->rss_conf = parser.rss_conf;
+ flow->rss_conf.rss_key = flow->rss_key;
+ memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
+ /* finalise the flow. */
+ if (parser.drop)
+ err = priv_flow_create_action_queue_drop(priv, &parser, flow,
error);
- if (!rte_flow)
+ else
+ err = priv_flow_create_action_queue(priv, &parser, flow, error);
+ if (err)
goto exit;
- return rte_flow;
+ TAILQ_INSERT_TAIL(list, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ return flow;
exit:
- rte_free(flow.ibv_attr);
+ if (parser.drop) {
+ rte_free(parser.drop_q.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
+ }
+ }
+ rte_free(flow);
return NULL;
}
/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx5_flow_parse parser = { .create = 0, };
+
+ priv_lock(priv);
+ ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
* Create a flow.
*
* @see rte_flow_create()
@@ -1249,11 +1998,8 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow *flow;
priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- TAILQ_INSERT_TAIL(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
+ flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
+ error);
priv_unlock(priv);
return flow;
}
@@ -1263,121 +2009,95 @@ mlx5_flow_create(struct rte_eth_dev *dev,
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
priv_flow_destroy(struct priv *priv,
+ struct mlx5_flows *list,
struct rte_flow *flow)
{
- TAILQ_REMOVE(&priv->flows, flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
- if (flow->drop)
+ unsigned int i;
+
+ if (flow->drop || !flow->mark)
goto free;
- if (flow->qp)
- claim_zero(ibv_destroy_qp(flow->qp));
- if (flow->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
- if (flow->drop && flow->wq)
- claim_zero(ibv_exp_destroy_wq(flow->wq));
- if (flow->drop && flow->cq)
- claim_zero(ibv_destroy_cq(flow->cq));
- if (flow->mark) {
+ for (i = 0; i != flow->queues_n; ++i) {
struct rte_flow *tmp;
- struct rxq *rxq;
- uint32_t mark_n = 0;
- uint32_t queue_n;
+ int mark = 0;
/*
* To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not).
*/
- for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
- rxq = flow->rxqs[queue_n];
- for (tmp = TAILQ_FIRST(&priv->flows);
- tmp;
- tmp = TAILQ_NEXT(tmp, next)) {
- uint32_t tqueue_n;
-
- if (tmp->drop)
+ TAILQ_FOREACH(tmp, list, next) {
+ unsigned int j;
+ uint16_t *tqs = NULL;
+ uint16_t tq_n = 0;
+
+ if (!tmp->mark)
+ continue;
+ for (j = 0; j != hash_rxq_init_n; ++j) {
+ if (!tmp->frxq[j].hrxq)
continue;
- for (tqueue_n = 0;
- tqueue_n < tmp->rxqs_n;
- ++tqueue_n) {
- struct rxq *trxq;
-
- trxq = tmp->rxqs[tqueue_n];
- if (rxq == trxq)
- ++mark_n;
- }
+ tqs = tmp->frxq[j].hrxq->ind_table->queues;
+ tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
}
- rxq->mark = !!mark_n;
+ if (!tq_n)
+ continue;
+ for (j = 0; (j != tq_n) && !mark; j++)
+ if (tqs[j] == (*flow->queues)[i])
+ mark = 1;
}
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
}
free:
- rte_free(flow->ibv_attr);
+ if (flow->drop) {
+ if (flow->drxq.ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ rte_free(flow->drxq.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ struct mlx5_flow *frxq = &flow->frxq[i];
+
+ if (frxq->ibv_flow)
+ claim_zero(ibv_destroy_flow(frxq->ibv_flow));
+ if (frxq->hrxq)
+ mlx5_priv_hrxq_release(priv, frxq->hrxq);
+ if (frxq->ibv_attr)
+ rte_free(frxq->ibv_attr);
+ }
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ }
+ TAILQ_REMOVE(list, flow, next);
DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow);
}
/**
- * Destroy a flow.
- *
- * @see rte_flow_destroy()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
-
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, flow);
- priv_unlock(priv);
- return 0;
-}
-
-/**
* Destroy all flows.
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
-static void
-priv_flow_flush(struct priv *priv)
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{
- while (!TAILQ_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
- flow = TAILQ_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
}
}
/**
- * Destroy all flows.
- *
- * @see rte_flow_flush()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
-
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv);
- priv_unlock(priv);
- return 0;
-}
-
-/**
* Create drop queue.
*
* @param priv
@@ -1386,11 +2106,10 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
* @return
* 0 on success.
*/
-static int
+int
priv_flow_create_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = NULL;
- unsigned int i;
+ struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd);
assert(priv->ctx);
@@ -1399,57 +2118,50 @@ priv_flow_create_drop_queue(struct priv *priv)
WARN("cannot allocate memory for drop queue");
goto error;
}
- fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
+ fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
WARN("cannot allocate CQ for drop queue");
goto error;
}
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
- &(struct ibv_exp_wq_init_attr){
- .wq_type = IBV_EXP_WQT_RQ,
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- .pd = priv->pd,
- .cq = fdq->cq,
- });
- if (!fdq->wqs[i]) {
- WARN("cannot allocate WQ for drop queue");
- goto error;
- }
- }
- fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
+ fdq->wq = ibv_create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
.pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wq) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
- .ind_tbl = fdq->wqs,
+ .ind_tbl = &fdq->wq,
.comp_mask = 0,
});
if (!fdq->ind_table) {
WARN("cannot allocate indirection table for drop queue");
goto error;
}
- fdq->qp = ibv_exp_create_qp(priv->ctx,
- &(struct ibv_exp_qp_init_attr){
+ fdq->qp = ibv_create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_hash_default_key_len,
.rx_hash_key = rss_hash_default_key,
.rx_hash_fields_mask = 0,
- .rwq_ind_tbl = fdq->ind_table,
},
- .port_num = priv->port,
- });
+ .rwq_ind_tbl = fdq->ind_table,
+ .pd = priv->pd
+ });
if (!fdq->qp) {
WARN("cannot allocate QP for drop queue");
goto error;
@@ -1460,11 +2172,9 @@ error:
if (fdq->qp)
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- if (fdq->wqs[i])
- claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
- }
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
if (fdq)
@@ -1479,22 +2189,19 @@ error:
* @param priv
* Pointer to private structure.
*/
-static void
+void
priv_flow_delete_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = priv->flow_drop_queue;
- unsigned int i;
+ struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq)
return;
if (fdq->qp)
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- if (fdq->wqs[i])
- claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
- }
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
rte_free(fdq);
@@ -1504,28 +2211,49 @@ priv_flow_delete_drop_queue(struct priv *priv)
/**
* Remove all flows.
*
- * Called by dev_stop() to remove all flows.
- *
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv)
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
- flow->ibv_flow = NULL;
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ unsigned int i;
+
+ if (flow->drop) {
+ if (!flow->drxq.ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ flow->drxq.ibv_flow = NULL;
+ /* Next flow. */
+ continue;
+ }
if (flow->mark) {
- unsigned int n;
+ struct mlx5_ind_table_ibv *ind_tbl = NULL;
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 0;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].hrxq)
+ continue;
+ ind_tbl = flow->frxq[i].hrxq->ind_table;
+ }
+ assert(ind_tbl);
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+ flow->frxq[i].ibv_flow = NULL;
+ mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ flow->frxq[i].hrxq = NULL;
}
DEBUG("Flow %p removed", (void *)flow);
}
- priv_flow_delete_drop_queue(priv);
}
/**
@@ -1533,75 +2261,321 @@ priv_flow_stop(struct priv *priv)
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv)
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{
- int ret;
struct rte_flow *flow;
- ret = priv_flow_create_drop_queue(priv);
- if (ret)
- return -1;
- TAILQ_FOREACH(flow, &priv->flows, next) {
- struct ibv_qp *qp;
+ TAILQ_FOREACH(flow, list, next) {
+ unsigned int i;
- if (flow->drop)
- qp = priv->flow_drop_queue->qp;
- else
- qp = flow->qp;
- flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
- if (!flow->ibv_flow) {
- DEBUG("Flow %p cannot be applied", (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
+ if (flow->drop) {
+ flow->drxq.ibv_flow =
+ ibv_create_flow(priv->flow_drop_queue->qp,
+ flow->drxq.ibv_attr);
+ if (!flow->drxq.ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ /* Next flow. */
+ continue;
}
- DEBUG("Flow %p applied", (void *)flow);
- if (flow->mark) {
- unsigned int n;
-
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 1;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_attr)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq[i].hrxq)
+ goto flow_create;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
}
+ if (!flow->mark)
+ continue;
+ for (i = 0; i != flow->queues_n; ++i)
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
}
return 0;
}
/**
- * Verify if the Rx queue is used in a flow.
+ * Verify the flow list is empty
*
* @param priv
- * Pointer to private structure.
- * @param rxq
- * Pointer to the queue to search.
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
*
* @return
- * Nonzero if the queue is used by a flow.
+ * 0 on success.
*/
int
-priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
struct rte_flow *flow;
+ struct rte_flow_error error;
+ unsigned int i;
+ union {
+ struct rte_flow_action_rss rss;
+ struct {
+ const struct rte_eth_rss_conf *rss_conf;
+ uint16_t num;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ } local;
+ } action_rss;
+
+ if (!priv->reta_idx_n)
+ return EINVAL;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ action_rss.local.queue[i] = (*priv->reta_idx)[i];
+ action_rss.local.rss_conf = &priv->rss_conf;
+ action_rss.local.num = priv->reta_idx_n;
+ actions[0].conf = (const void *)&action_rss.rss;
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+ &error);
+ if (!flow)
+ return rte_errno;
+ return 0;
+}
- for (flow = TAILQ_FIRST(&priv->flows);
- flow;
- flow = TAILQ_NEXT(flow, next)) {
- unsigned int n;
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
- if (flow->drop)
- continue;
- for (n = 0; n < flow->rxqs_n; ++n) {
- if (flow->rxqs[n] == rxq)
- return 1;
- }
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, &priv->flows, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv, &priv->flows);
+ priv_unlock(priv);
+ return 0;
+}
+
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+/**
+ * Query flow counter.
+ *
+ * @param cs
+ * the counter set.
+ * @param counter_value
+ * returned data from the counter.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_query_count(struct ibv_counter_set *cs,
+ struct mlx5_flow_counter_stats *counter_stats,
+ struct rte_flow_query_count *query_count,
+ struct rte_flow_error *error)
+{
+ uint64_t counters[2];
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+
+ if (res) {
+ rte_flow_error_set(error, -res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ return -res;
+ }
+ query_count->hits_set = 1;
+ query_count->bytes_set = 1;
+ query_count->hits = counters[0] - counter_stats->hits;
+ query_count->bytes = counters[1] - counter_stats->bytes;
+ if (query_count->reset) {
+ counter_stats->hits = counters[0];
+ counter_stats->bytes = counters[1];
}
return 0;
}
/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action __rte_unused,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int res = EINVAL;
+
+ priv_lock(priv);
+ if (flow->cs) {
+ res = priv_flow_query_count(flow->cs,
+ &flow->counter_stats,
+ (struct rte_flow_query_count *)data,
+ error);
+ } else {
+ rte_flow_error_set(error, res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no counter found for flow");
+ }
+ priv_unlock(priv);
+ return -res;
+}
+#endif
+
+/**
* Isolated mode.
*
* @see rte_flow_isolate()
@@ -1615,7 +2589,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
- if (priv->started) {
+ if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -1624,6 +2598,497 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
return -rte_errno;
}
priv->isolated = !!enable;
+ if (enable)
+ priv->dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ priv->dev->dev_ops = &mlx5_dev_ops;
priv_unlock(priv);
return 0;
}
+
+/**
+ * Convert a flow director filter to a generic flow.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ * @param attributes
+ * Generic flow parameters structure.
+ *
+ * @return
+ * 0 on success, errno value on error.
+ */
+static int
+priv_fdir_filter_convert(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct mlx5_fdir *attributes)
+{
+ const struct rte_eth_fdir_input *input = &fdir_filter->input;
+
+ /* Validate queue number. */
+ if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
+ ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
+ return EINVAL;
+ }
+ attributes->attr.ingress = 1;
+ attributes->items[0] = (struct rte_flow_item) {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &attributes->l2,
+ .mask = &attributes->l2_mask,
+ };
+ switch (fdir_filter->action.behavior) {
+ case RTE_ETH_FDIR_ACCEPT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &attributes->queue,
+ };
+ break;
+ case RTE_ETH_FDIR_REJECT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_DROP,
+ };
+ break;
+ default:
+ ERROR("invalid behavior %d", fdir_filter->action.behavior);
+ return ENOTSUP;
+ }
+ attributes->queue.index = fdir_filter->action.rx_queue;
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.udp4_flow.ip.src_ip,
+ .dst_addr = input->flow.udp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.udp4_flow.ip.ttl,
+ .type_of_service = input->flow.udp4_flow.ip.tos,
+ .next_proto_id = input->flow.udp4_flow.ip.proto,
+ };
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.tcp4_flow.ip.src_ip,
+ .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.tcp4_flow.ip.ttl,
+ .type_of_service = input->flow.tcp4_flow.ip.tos,
+ .next_proto_id = input->flow.tcp4_flow.ip.proto,
+ };
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp4_flow.src_port,
+ .dst_port = input->flow.tcp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.udp6_flow.ip.hop_limits,
+ .proto = input->flow.udp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.udp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.udp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp6_flow.src_port,
+ .dst_port = input->flow.udp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
+ .proto = input->flow.tcp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.tcp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.tcp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp6_flow.src_port,
+ .dst_port = input->flow.tcp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ break;
+ default:
+ ERROR("invalid flow type%d",
+ fdir_filter->input.flow_type);
+ return ENOTSUP;
+ }
+ return 0;
+}
+
+/**
+ * Add new flow director filter and store it in list.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_add(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ .l2_mask = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ },
+ };
+ struct mlx5_flow_parse parser = {
+ .layer = HASH_RXQ_ETH,
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ int ret;
+
+ ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ if (ret)
+ return -ret;
+ ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ attributes.actions, &error, &parser);
+ if (ret)
+ return -ret;
+ flow = priv_flow_create(priv,
+ &priv->flows,
+ &attributes.attr,
+ attributes.items,
+ attributes.actions,
+ &error);
+ if (flow) {
+ DEBUG("FDIR created %p", (void *)flow);
+ return 0;
+ }
+ return ENOTSUP;
+}
+
+/**
+ * Delete specific filter.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Filter to be deleted.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_delete(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ };
+ struct mlx5_flow_parse parser = {
+ .create = 1,
+ .layer = HASH_RXQ_ETH,
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret;
+
+ ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ if (ret)
+ return -ret;
+ ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ attributes.actions, &error, &parser);
+ if (ret)
+ goto exit;
+ /*
+ * Special case for drop action which is only set in the
+ * specifications when the flow is created. In this situation the
+ * drop specification is missing.
+ */
+ if (parser.drop) {
+ struct ibv_flow_spec_action_drop *drop;
+
+ drop = (void *)((uintptr_t)parser.drop_q.ibv_attr +
+ parser.drop_q.offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ };
+ parser.drop_q.ibv_attr->num_of_specs++;
+ }
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ struct ibv_flow_attr *attr;
+ struct ibv_spec_header *attr_h;
+ void *spec;
+ struct ibv_flow_attr *flow_attr;
+ struct ibv_spec_header *flow_h;
+ void *flow_spec;
+ unsigned int specs_n;
+
+ if (parser.drop)
+ attr = parser.drop_q.ibv_attr;
+ else
+ attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
+ if (flow->drop)
+ flow_attr = flow->drxq.ibv_attr;
+ else
+ flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
+ /* Compare first the attributes. */
+ if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
+ continue;
+ if (attr->num_of_specs == 0)
+ continue;
+ spec = (void *)((uintptr_t)attr +
+ sizeof(struct ibv_flow_attr));
+ flow_spec = (void *)((uintptr_t)flow_attr +
+ sizeof(struct ibv_flow_attr));
+ specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);
+ for (i = 0; i != specs_n; ++i) {
+ attr_h = spec;
+ flow_h = flow_spec;
+ if (memcmp(spec, flow_spec,
+ RTE_MIN(attr_h->size, flow_h->size)))
+ continue;
+ spec = (void *)((uintptr_t)attr + attr_h->size);
+ flow_spec = (void *)((uintptr_t)flow_attr +
+ flow_h->size);
+ }
+ /* At this point, the flow match. */
+ break;
+ }
+ if (flow)
+ priv_flow_destroy(priv, &priv->flows, flow);
+exit:
+ if (parser.drop) {
+ rte_free(parser.drop_q.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
+ }
+ }
+ return -ret;
+}
+
+/**
+ * Update queue for specific filter.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Filter to be updated.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_update(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = priv_fdir_filter_delete(priv, fdir_filter);
+ if (ret)
+ return ret;
+ ret = priv_fdir_filter_add(priv, fdir_filter);
+ return ret;
+}
+
+/**
+ * Flush all filters.
+ *
+ * @param priv
+ * Private structure.
+ */
+static void
+priv_fdir_filter_flush(struct priv *priv)
+{
+ priv_flow_flush(priv, &priv->flows);
+}
+
+/**
+ * Get flow director information.
+ *
+ * @param priv
+ * Private structure.
+ * @param[out] fdir_info
+ * Resulting flow director information.
+ */
+static void
+priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
+{
+ struct rte_eth_fdir_masks *mask =
+ &priv->dev->data->dev_conf.fdir_conf.mask;
+
+ fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
+ fdir_info->guarant_spc = 0;
+ rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
+ fdir_info->max_flexpayload = 0;
+ fdir_info->flow_types_mask[0] = 0;
+ fdir_info->flex_payload_unit = 0;
+ fdir_info->max_flex_payload_segment_num = 0;
+ fdir_info->flex_payload_limit = 0;
+ memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
+}
+
+/**
+ * Deal with flow director operations.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
+{
+ enum rte_fdir_mode fdir_mode =
+ priv->dev->data->dev_conf.fdir_conf.mode;
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+ if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
+ fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ ERROR("%p: flow director mode %d not supported",
+ (void *)priv, fdir_mode);
+ return EINVAL;
+ }
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = priv_fdir_filter_add(priv, arg);
+ break;
+ case RTE_ETH_FILTER_UPDATE:
+ ret = priv_fdir_filter_update(priv, arg);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = priv_fdir_filter_delete(priv, arg);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ priv_fdir_filter_flush(priv);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ priv_fdir_info_get(priv, arg);
+ break;
+ default:
+ DEBUG("%p: unknown operation %u", (void *)priv,
+ filter_op);
+ ret = EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+ struct priv *priv = dev->data->dev_private;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_FDIR:
+ priv_lock(priv);
+ ret = priv_fdir_ctrl_func(priv, filter_op, arg);
+ priv_unlock(priv);
+ break;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ return -ret;
+}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 8489ea67..d17b991e 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -51,16 +51,9 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -90,112 +83,6 @@ priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
}
/**
- * Delete MAC flow steering rule.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param mac_index
- * MAC address index.
- * @param vlan_index
- * VLAN index to use.
- */
-static void
-hash_rxq_del_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
- unsigned int vlan_index)
-{
-#ifndef NDEBUG
- const uint8_t (*mac)[ETHER_ADDR_LEN] =
- (const uint8_t (*)[ETHER_ADDR_LEN])
- hash_rxq->priv->mac[mac_index].addr_bytes;
-#endif
-
- assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
- assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index]));
- if (hash_rxq->mac_flow[mac_index][vlan_index] == NULL)
- return;
- DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
- " VLAN index %u",
- (void *)hash_rxq,
- (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
- mac_index,
- vlan_index);
- claim_zero(ibv_exp_destroy_flow(hash_rxq->mac_flow
- [mac_index][vlan_index]));
- hash_rxq->mac_flow[mac_index][vlan_index] = NULL;
-}
-
-/**
- * Unregister a MAC address from a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param mac_index
- * MAC address index.
- */
-static void
-hash_rxq_mac_addr_del(struct hash_rxq *hash_rxq, unsigned int mac_index)
-{
- unsigned int i;
-
- assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
- for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow[mac_index])); ++i)
- hash_rxq_del_mac_flow(hash_rxq, mac_index, i);
-}
-
-/**
- * Unregister all MAC addresses from a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- */
-void
-hash_rxq_mac_addrs_del(struct hash_rxq *hash_rxq)
-{
- unsigned int i;
-
- for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow)); ++i)
- hash_rxq_mac_addr_del(hash_rxq, i);
-}
-
-/**
- * Unregister a MAC address.
- *
- * This is done for each hash RX queue.
- *
- * @param priv
- * Pointer to private structure.
- * @param mac_index
- * MAC address index.
- */
-static void
-priv_mac_addr_del(struct priv *priv, unsigned int mac_index)
-{
- unsigned int i;
-
- assert(mac_index < RTE_DIM(priv->mac));
- if (!BITFIELD_ISSET(priv->mac_configured, mac_index))
- return;
- for (i = 0; (i != priv->hash_rxqs_n); ++i)
- hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[i], mac_index);
- BITFIELD_RESET(priv->mac_configured, mac_index);
-}
-
-/**
- * Unregister all MAC addresses from all hash RX queues.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-priv_mac_addrs_disable(struct priv *priv)
-{
- unsigned int i;
-
- for (i = 0; (i != priv->hash_rxqs_n); ++i)
- hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[i]);
-}
-
-/**
* DPDK callback to remove a MAC address.
*
* @param dev
@@ -206,258 +93,12 @@ priv_mac_addrs_disable(struct priv *priv)
void
mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct priv *priv = dev->data->dev_private;
-
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- DEBUG("%p: removing MAC address from index %" PRIu32,
- (void *)dev, index);
- if (index >= RTE_DIM(priv->mac))
- goto end;
- priv_mac_addr_del(priv, index);
-end:
- priv_unlock(priv);
-}
-
-/**
- * Add MAC flow steering rule.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param mac_index
- * MAC address index to register.
- * @param vlan_index
- * VLAN index to use.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
- unsigned int vlan_index)
-{
- struct ibv_exp_flow *flow;
- struct priv *priv = hash_rxq->priv;
- const uint8_t (*mac)[ETHER_ADDR_LEN] =
- (const uint8_t (*)[ETHER_ADDR_LEN])
- priv->mac[mac_index].addr_bytes;
- FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
- struct ibv_exp_flow_attr *attr = &data->attr;
- struct ibv_exp_flow_spec_eth *spec = &data->spec;
- unsigned int vlan_enabled = !!priv->vlan_filter_n;
- unsigned int vlan_id = priv->vlan_filter[vlan_index];
-
- assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
- assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index]));
- if (hash_rxq->mac_flow[mac_index][vlan_index] != NULL)
- return 0;
- /*
- * No padding must be inserted by the compiler between attr and spec.
- * This layout is expected by libibverbs.
- */
- assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
- priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
- /* The first specification must be Ethernet. */
- assert(spec->type == IBV_EXP_FLOW_SPEC_ETH);
- assert(spec->size == sizeof(*spec));
- *spec = (struct ibv_exp_flow_spec_eth){
- .type = IBV_EXP_FLOW_SPEC_ETH,
- .size = sizeof(*spec),
- .val = {
- .dst_mac = {
- (*mac)[0], (*mac)[1], (*mac)[2],
- (*mac)[3], (*mac)[4], (*mac)[5]
- },
- .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
- },
- .mask = {
- .dst_mac = "\xff\xff\xff\xff\xff\xff",
- .vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
- },
- };
- DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
- " VLAN index %u filtering %s, ID %u",
- (void *)hash_rxq,
- (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
- mac_index,
- vlan_index,
- (vlan_enabled ? "enabled" : "disabled"),
- vlan_id);
- /* Create related flow. */
- errno = 0;
- flow = ibv_exp_create_flow(hash_rxq->qp, attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow configuration failed, errno=%d: %s",
- (void *)hash_rxq, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
- hash_rxq->mac_flow[mac_index][vlan_index] = flow;
- return 0;
-}
-
-/**
- * Register a MAC address in a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param mac_index
- * MAC address index to register.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-hash_rxq_mac_addr_add(struct hash_rxq *hash_rxq, unsigned int mac_index)
-{
- struct priv *priv = hash_rxq->priv;
- unsigned int i = 0;
- int ret;
-
- assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
- assert(RTE_DIM(hash_rxq->mac_flow[mac_index]) ==
- RTE_DIM(priv->vlan_filter));
- /* Add a MAC address for each VLAN filter, or at least once. */
- do {
- ret = hash_rxq_add_mac_flow(hash_rxq, mac_index, i);
- if (ret) {
- /* Failure, rollback. */
- while (i != 0)
- hash_rxq_del_mac_flow(hash_rxq, mac_index,
- --i);
- return ret;
- }
- } while (++i < priv->vlan_filter_n);
- return 0;
-}
-
-/**
- * Register all MAC addresses in a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-hash_rxq_mac_addrs_add(struct hash_rxq *hash_rxq)
-{
- struct priv *priv = hash_rxq->priv;
- unsigned int i;
- int ret;
-
- assert(RTE_DIM(priv->mac) == RTE_DIM(hash_rxq->mac_flow));
- for (i = 0; (i != RTE_DIM(priv->mac)); ++i) {
- if (!BITFIELD_ISSET(priv->mac_configured, i))
- continue;
- ret = hash_rxq_mac_addr_add(hash_rxq, i);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- hash_rxq_mac_addr_del(hash_rxq, --i);
- assert(ret > 0);
- return ret;
- }
- return 0;
-}
-
-/**
- * Register a MAC address.
- *
- * This is done for each hash RX queue.
- *
- * @param priv
- * Pointer to private structure.
- * @param mac_index
- * MAC address index to use.
- * @param mac
- * MAC address to register.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-priv_mac_addr_add(struct priv *priv, unsigned int mac_index,
- const uint8_t (*mac)[ETHER_ADDR_LEN])
-{
- unsigned int i;
- int ret;
-
- assert(mac_index < RTE_DIM(priv->mac));
- /* First, make sure this address isn't already configured. */
- for (i = 0; (i != RTE_DIM(priv->mac)); ++i) {
- /* Skip this index, it's going to be reconfigured. */
- if (i == mac_index)
- continue;
- if (!BITFIELD_ISSET(priv->mac_configured, i))
- continue;
- if (memcmp(priv->mac[i].addr_bytes, *mac, sizeof(*mac)))
- continue;
- /* Address already configured elsewhere, return with error. */
- return EADDRINUSE;
- }
- if (BITFIELD_ISSET(priv->mac_configured, mac_index))
- priv_mac_addr_del(priv, mac_index);
- priv->mac[mac_index] = (struct ether_addr){
- {
- (*mac)[0], (*mac)[1], (*mac)[2],
- (*mac)[3], (*mac)[4], (*mac)[5]
- }
- };
- if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
- goto end;
- for (i = 0; (i != priv->hash_rxqs_n); ++i) {
- ret = hash_rxq_mac_addr_add(&(*priv->hash_rxqs)[i], mac_index);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[--i],
- mac_index);
- return ret;
- }
-end:
- BITFIELD_SET(priv->mac_configured, mac_index);
- return 0;
-}
-
-/**
- * Register all MAC addresses in all hash RX queues.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-priv_mac_addrs_enable(struct priv *priv)
-{
- unsigned int i;
- int ret;
-
- if (priv->isolated)
- return 0;
- if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
- return 0;
- for (i = 0; (i != priv->hash_rxqs_n); ++i) {
- ret = hash_rxq_mac_addrs_add(&(*priv->hash_rxqs)[i]);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0)
- hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[--i]);
- assert(ret > 0);
- return ret;
- }
- return 0;
+ assert(index < MLX5_MAX_MAC_ADDRESSES);
+ memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
+ if (!dev->data->promiscuous && !dev->data->all_multicast)
+ mlx5_traffic_restart(dev);
}
/**
@@ -471,31 +112,35 @@ priv_mac_addrs_enable(struct priv *priv)
* MAC address index.
* @param vmdq
* VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success.
*/
int
-mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index, uint32_t vmdq)
{
- struct priv *priv = dev->data->dev_private;
- int re;
-
- if (mlx5_is_secondary())
- return -ENOTSUP;
+ unsigned int i;
+ int ret = 0;
(void)vmdq;
- priv_lock(priv);
- DEBUG("%p: adding MAC address at index %" PRIu32,
- (void *)dev, index);
- if (index >= RTE_DIM(priv->mac)) {
- re = EINVAL;
- goto end;
+ if (mlx5_is_secondary())
+ return 0;
+ assert(index < MLX5_MAX_MAC_ADDRESSES);
+ /* First, make sure this address isn't already configured. */
+ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
+ /* Skip this index, it's going to be reconfigured. */
+ if (i == index)
+ continue;
+ if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
+ continue;
+ /* Address already configured elsewhere, return with error. */
+ return EADDRINUSE;
}
- re = priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
-end:
- priv_unlock(priv);
- return -re;
+ dev->data->mac_addrs[index] = *mac;
+ if (!dev->data->promiscuous && !dev->data->all_multicast)
+ mlx5_traffic_restart(dev);
+ return ret;
}
/**
@@ -509,7 +154,8 @@ end:
void
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
+ if (mlx5_is_secondary())
+ return;
DEBUG("%p: setting primary MAC address", (void *)dev);
- mlx5_mac_addr_remove(dev, 0);
mlx5_mac_addr_add(dev, mac_addr, 0, 0);
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 28733517..6b29eed5 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -41,14 +41,8 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mempool.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <rte_malloc.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
@@ -118,59 +112,13 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
}
/**
- * Register mempool as a memory region.
- *
- * @param pd
- * Pointer to protection domain.
- * @param mp
- * Pointer to memory pool.
- *
- * @return
- * Memory region pointer, NULL in case of error.
- */
-struct ibv_mr *
-mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
-{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
-
- if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
- }
-
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
- }
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- return ibv_reg_mr(pd,
- (void *)start,
- end - start,
- IBV_ACCESS_LOCAL_WRITE);
-}
-
-/**
* Register a Memory Region (MR) <-> Memory Pool (MP) association in
* txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
*
* This function should only be called by txq_mp2mr().
*
+ * @param priv
+ * Pointer to private structure.
* @param txq
* Pointer to TX queue structure.
* @param[in] mp
@@ -179,45 +127,75 @@ mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
* Index of the next available entry.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * mr on success, NULL on failure.
*/
-uint32_t
-txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
+struct mlx5_mr*
+priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
+ struct rte_mempool *mp, unsigned int idx)
{
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- struct ibv_mr *mr;
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr *mr;
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
(void *)txq_ctrl, mp->name, (void *)mp);
- mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp);
+ mr = priv_mr_get(priv, mp);
+ if (mr == NULL)
+ mr = priv_mr_new(priv, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq_ctrl);
- return (uint32_t)-1;
+ return NULL;
}
- if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) {
+ if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
/* Table is full, remove oldest entry. */
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq_ctrl);
--idx;
- claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr));
- memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1],
- (sizeof(txq_ctrl->txq.mp2mr) -
- sizeof(txq_ctrl->txq.mp2mr[0])));
+ priv_mr_release(priv, txq->mp2mr[0]);
+ memmove(&txq->mp2mr[0], &txq->mp2mr[1],
+ (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
/* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
- txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
- txq_ctrl->txq.mp2mr[idx].mr = mr;
- txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
+ txq_ctrl->txq.mp2mr[idx] = mr;
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
(void *)txq_ctrl, mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx].lkey);
- return txq_ctrl->txq.mp2mr[idx].lkey;
+ txq_ctrl->txq.mp2mr[idx]->lkey);
+ return mr;
+}
+
+/**
+ * Register a Memory Region (MR) <-> Memory Pool (MP) association in
+ * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ *
+ * This function should only be called by txq_mp2mr().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param[in] mp
+ * Memory Pool for which a Memory Region lkey must be returned.
+ * @param idx
+ * Index of the next available entry.
+ *
+ * @return
+ * mr on success, NULL on failure.
+ */
+struct mlx5_mr*
+mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
+ unsigned int idx)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr *mr;
+
+ priv_lock(txq_ctrl->priv);
+ mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
+ priv_unlock(txq_ctrl->priv);
+ return mr;
}
-struct txq_mp2mr_mbuf_check_data {
+struct mlx5_mp2mr_mbuf_check_data {
int ret;
};
@@ -239,7 +217,7 @@ static void
txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
uint32_t index __rte_unused)
{
- struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct mlx5_mp2mr_mbuf_check_data *data = arg;
struct rte_mbuf *buf = obj;
/*
@@ -260,35 +238,158 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
* Pointer to TX queue structure.
*/
void
-txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
- struct txq_ctrl *txq_ctrl = arg;
- struct txq_mp2mr_mbuf_check_data data = {
+ struct priv *priv = (struct priv *)arg;
+ struct mlx5_mp2mr_mbuf_check_data data = {
.ret = 0,
};
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ struct mlx5_mr *mr;
/* Register mempool only if the first element looks like a mbuf. */
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
+ mr = priv_mr_get(priv, mp);
+ if (mr) {
+ priv_mr_release(priv, mr);
+ return;
+ }
+ priv_mr_new(priv, mp);
+}
+
+/**
+ * Register a new memory region from the mempool and store it in the memory
+ * region list.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param mp
+ * Pointer to the memory pool to register.
+ * @return
+ * The memory region on success.
+ */
+struct mlx5_mr*
+priv_mr_new(struct priv *priv, struct rte_mempool *mp)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start;
+ uintptr_t end;
+ unsigned int i;
+ struct mlx5_mr *mr;
+
+ mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
+ if (!mr) {
+ DEBUG("unable to configure MR, ibv_reg_mr() failed.");
+ return NULL;
+ }
if (mlx5_check_mempool(mp, &start, &end) != 0) {
ERROR("mempool %p: not virtually contiguous",
(void *)mp);
- return;
+ return NULL;
}
- for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- struct ibv_mr *mr = txq_ctrl->txq.mp2mr[i].mr;
+ DEBUG("mempool %p area start=%p end=%p size=%zu",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
- if (unlikely(mr == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
+ IBV_ACCESS_LOCAL_WRITE);
+ mr->mp = mp;
+ mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
+ mr->start = start;
+ mr->end = (uintptr_t)mr->mr->addr + mr->mr->length;
+ rte_atomic32_inc(&mr->refcnt);
+ DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
+ (void *)mr, rte_atomic32_read(&mr->refcnt));
+ LIST_INSERT_HEAD(&priv->mr, mr, next);
+ return mr;
+}
+
+/**
+ * Search the memory region object in the memory region list.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param mp
+ * Pointer to the memory pool to register.
+ * @return
+ * The memory region on success.
+ */
+struct mlx5_mr*
+priv_mr_get(struct priv *priv, struct rte_mempool *mp)
+{
+ struct mlx5_mr *mr;
+
+ assert(mp);
+ if (LIST_EMPTY(&priv->mr))
+ return NULL;
+ LIST_FOREACH(mr, &priv->mr, next) {
+ if (mr->mp == mp) {
+ rte_atomic32_inc(&mr->refcnt);
+ DEBUG("Memory Region %p refcnt: %d",
+ (void *)mr, rte_atomic32_read(&mr->refcnt));
+ return mr;
}
- if (start >= (uintptr_t)mr->addr &&
- end <= (uintptr_t)mr->addr + mr->length)
- return;
}
- txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
+ return NULL;
+}
+
+/**
+ * Release the memory region object.
+ *
+ * @param mr
+ * Pointer to memory region to release.
+ *
+ * @return
+ * 0 on success, errno on failure.
+ */
+int
+priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
+{
+ (void)priv;
+ assert(mr);
+ DEBUG("Memory Region %p refcnt: %d",
+ (void *)mr, rte_atomic32_read(&mr->refcnt));
+ if (rte_atomic32_dec_and_test(&mr->refcnt)) {
+ claim_zero(ibv_dereg_mr(mr->mr));
+ LIST_REMOVE(mr, next);
+ rte_free(mr);
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Verify the flow list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+priv_mr_verify(struct priv *priv)
+{
+ int ret = 0;
+ struct mlx5_mr *mr;
+
+ LIST_FOREACH(mr, &priv->mr, next) {
+ DEBUG("%p: mr %p still referenced", (void *)priv,
+ (void *)mr);
+ ++ret;
+ }
+ return ret;
}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 608072f7..2de310bc 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -41,7 +41,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
-#include <infiniband/mlx5_hw.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -89,9 +89,6 @@
/* Default max packet length to be inlined. */
#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE)
-#ifndef HAVE_VERBS_MLX5_OPCODE_TSO
-#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
-#endif
#define MLX5_OPC_MOD_ENHANCED_MPSW 0
#define MLX5_OPCODE_ENHANCED_MPSW 0x29
@@ -154,6 +151,9 @@
/* Default mark value used when none is provided. */
#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+/* Maximum number of DS in WQE. */
+#define MLX5_DSEG_MAX 63
+
/* Subset of struct mlx5_wqe_eth_seg. */
struct mlx5_wqe_eth_seg_small {
uint32_t rsvd0;
@@ -244,6 +244,46 @@ struct mlx5_cqe {
uint8_t op_own;
};
+/* Adding direct verbs to data-path. */
+
+/* CQ sequence number mask. */
+#define MLX5_CQ_SQN_MASK 0x3
+
+/* CQ sequence number index. */
+#define MLX5_CQ_SQN_OFFSET 28
+
+/* CQ doorbell index mask. */
+#define MLX5_CI_MASK 0xffffff
+
+/* CQ doorbell offset. */
+#define MLX5_CQ_ARM_DB 1
+
+/* CQ doorbell offset*/
+#define MLX5_CQ_DOORBELL 0x20
+
+/* CQE format value. */
+#define MLX5_COMPRESSED 0x3
+
+/* CQE format mask. */
+#define MLX5E_CQE_FORMAT_MASK 0xc
+
+/* MPW opcode. */
+#define MLX5_OPC_MOD_MPW 0x01
+
+/* Compressed Rx CQE structure. */
+struct mlx5_mini_cqe8 {
+ union {
+ uint32_t rx_hash_result;
+ uint32_t checksum;
+ struct {
+ uint16_t wqe_counter;
+ uint8_t s_wqe_opcode;
+ uint8_t reserved;
+ } s_wqe_info;
+ };
+ uint32_t byte_cnt;
+};
+
/**
* Convert a user mark to flow mark.
*
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index a2dd7d17..f3de46de 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -47,88 +47,13 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_malloc.h>
#include <rte_ethdev.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
/**
- * Get a RSS configuration hash key.
- *
- * @param priv
- * Pointer to private structure.
- * @param rss_hf
- * RSS hash functions configuration must be retrieved for.
- *
- * @return
- * Pointer to a RSS configuration structure or NULL if rss_hf cannot
- * be matched.
- */
-static struct rte_eth_rss_conf *
-rss_hash_get(struct priv *priv, uint64_t rss_hf)
-{
- unsigned int i;
-
- for (i = 0; (i != hash_rxq_init_n); ++i) {
- uint64_t dpdk_rss_hf = hash_rxq_init[i].dpdk_rss_hf;
-
- if (!(dpdk_rss_hf & rss_hf))
- continue;
- return (*priv->rss_conf)[i];
- }
- return NULL;
-}
-
-/**
- * Register a RSS key.
- *
- * @param priv
- * Pointer to private structure.
- * @param key
- * Hash key to register.
- * @param key_len
- * Hash key length in bytes.
- * @param rss_hf
- * RSS hash functions the provided key applies to.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-rss_hash_rss_conf_new_key(struct priv *priv, const uint8_t *key,
- unsigned int key_len, uint64_t rss_hf)
-{
- unsigned int i;
-
- for (i = 0; (i != hash_rxq_init_n); ++i) {
- struct rte_eth_rss_conf *rss_conf;
- uint64_t dpdk_rss_hf = hash_rxq_init[i].dpdk_rss_hf;
-
- if (!(dpdk_rss_hf & rss_hf))
- continue;
- rss_conf = rte_realloc((*priv->rss_conf)[i],
- (sizeof(*rss_conf) + key_len),
- 0);
- if (!rss_conf)
- return ENOMEM;
- rss_conf->rss_key = (void *)(rss_conf + 1);
- rss_conf->rss_key_len = key_len;
- rss_conf->rss_hf = dpdk_rss_hf;
- memcpy(rss_conf->rss_key, key, key_len);
- (*priv->rss_conf)[i] = rss_conf;
- }
- return 0;
-}
-
-/**
* DPDK callback to update the RSS hash configuration.
*
* @param dev
@@ -144,23 +69,24 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct priv *priv = dev->data->dev_private;
- int err = 0;
+ int ret = 0;
priv_lock(priv);
-
- assert(priv->rss_conf != NULL);
-
- /* Apply configuration. */
- if (rss_conf->rss_key)
- err = rss_hash_rss_conf_new_key(priv,
- rss_conf->rss_key,
- rss_conf->rss_key_len,
- rss_conf->rss_hf);
- /* Store protocols for which RSS is enabled. */
- priv->rss_hf = rss_conf->rss_hf;
+ if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
+ rss_conf->rss_key_len, 0);
+ if (!priv->rss_conf.rss_key) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
+ }
+ priv->rss_conf.rss_hf = rss_conf->rss_hf;
+out:
priv_unlock(priv);
- assert(err >= 0);
- return -err;
+ return ret;
}
/**
@@ -179,26 +105,17 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct priv *priv = dev->data->dev_private;
- struct rte_eth_rss_conf *priv_rss_conf;
- priv_lock(priv);
-
- assert(priv->rss_conf != NULL);
-
- priv_rss_conf = rss_hash_get(priv, rss_conf->rss_hf);
- if (!priv_rss_conf) {
- rss_conf->rss_hf = 0;
- priv_unlock(priv);
+ if (!rss_conf)
return -EINVAL;
- }
+ priv_lock(priv);
if (rss_conf->rss_key &&
- rss_conf->rss_key_len >= priv_rss_conf->rss_key_len)
- memcpy(rss_conf->rss_key,
- priv_rss_conf->rss_key,
- priv_rss_conf->rss_key_len);
- rss_conf->rss_key_len = priv_rss_conf->rss_key_len;
- rss_conf->rss_hf = priv_rss_conf->rss_hf;
-
+ (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
+ memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
+ priv->rss_conf.rss_key_len);
+ }
+ rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
+ rss_conf->rss_hf = priv->rss_conf.rss_hf;
priv_unlock(priv);
return 0;
}
@@ -357,11 +274,13 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
- mlx5_dev_stop(dev);
+ assert(!mlx5_is_secondary());
priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
priv_unlock(priv);
- if (ret)
- return -ret;
- return mlx5_dev_start(dev);
+ if (dev->data->dev_started) {
+ mlx5_dev_stop(dev);
+ mlx5_dev_start(dev);
+ }
+ return -ret;
}
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index a67e5426..0ef2cdf0 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -45,343 +45,12 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ethdev.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
-/* Initialization data for special flows. */
-static const struct special_flow_init special_flow_init[] = {
- [HASH_RXQ_FLOW_TYPE_PROMISC] = {
- .dst_mac_val = "\x00\x00\x00\x00\x00\x00",
- .dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
- .hash_types =
- 1 << HASH_RXQ_TCPV4 |
- 1 << HASH_RXQ_UDPV4 |
- 1 << HASH_RXQ_IPV4 |
- 1 << HASH_RXQ_TCPV6 |
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 1 << HASH_RXQ_ETH |
- 0,
- .per_vlan = 0,
- },
- [HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
- .dst_mac_val = "\x01\x00\x00\x00\x00\x00",
- .dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
- .hash_types =
- 1 << HASH_RXQ_UDPV4 |
- 1 << HASH_RXQ_IPV4 |
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 1 << HASH_RXQ_ETH |
- 0,
- .per_vlan = 0,
- },
- [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
- .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
- .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
- .hash_types =
- 1 << HASH_RXQ_UDPV4 |
- 1 << HASH_RXQ_IPV4 |
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 1 << HASH_RXQ_ETH |
- 0,
- .per_vlan = 1,
- },
- [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
- .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
- .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
- .hash_types =
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 1 << HASH_RXQ_ETH |
- 0,
- .per_vlan = 1,
- },
-};
-
-/**
- * Enable a special flow in a hash RX queue for a given VLAN index.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param flow_type
- * Special flow type.
- * @param vlan_index
- * VLAN index to use.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
- enum hash_rxq_flow_type flow_type,
- unsigned int vlan_index)
-{
- struct priv *priv = hash_rxq->priv;
- struct ibv_exp_flow *flow;
- FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
- struct ibv_exp_flow_attr *attr = &data->attr;
- struct ibv_exp_flow_spec_eth *spec = &data->spec;
- const uint8_t *mac;
- const uint8_t *mask;
- unsigned int vlan_enabled = (priv->vlan_filter_n &&
- special_flow_init[flow_type].per_vlan);
- unsigned int vlan_id = priv->vlan_filter[vlan_index];
-
- /* Check if flow is relevant for this hash_rxq. */
- if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
- return 0;
- /* Check if flow already exists. */
- if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
- return 0;
-
- /*
- * No padding must be inserted by the compiler between attr and spec.
- * This layout is expected by libibverbs.
- */
- assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
- priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
- /* The first specification must be Ethernet. */
- assert(spec->type == IBV_EXP_FLOW_SPEC_ETH);
- assert(spec->size == sizeof(*spec));
-
- mac = special_flow_init[flow_type].dst_mac_val;
- mask = special_flow_init[flow_type].dst_mac_mask;
- *spec = (struct ibv_exp_flow_spec_eth){
- .type = IBV_EXP_FLOW_SPEC_ETH,
- .size = sizeof(*spec),
- .val = {
- .dst_mac = {
- mac[0], mac[1], mac[2],
- mac[3], mac[4], mac[5],
- },
- .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
- },
- .mask = {
- .dst_mac = {
- mask[0], mask[1], mask[2],
- mask[3], mask[4], mask[5],
- },
- .vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
- },
- };
-
- errno = 0;
- flow = ibv_exp_create_flow(hash_rxq->qp, attr);
- if (flow == NULL) {
- /* It's not clear whether errno is always set in this case. */
- ERROR("%p: flow configuration failed, errno=%d: %s",
- (void *)hash_rxq, errno,
- (errno ? strerror(errno) : "Unknown error"));
- if (errno)
- return errno;
- return EINVAL;
- }
- hash_rxq->special_flow[flow_type][vlan_index] = flow;
- DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
- (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
- vlan_id, vlan_index);
- return 0;
-}
-
-/**
- * Disable a special flow in a hash RX queue for a given VLAN index.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param flow_type
- * Special flow type.
- * @param vlan_index
- * VLAN index to use.
- */
-static void
-hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
- enum hash_rxq_flow_type flow_type,
- unsigned int vlan_index)
-{
- struct ibv_exp_flow *flow =
- hash_rxq->special_flow[flow_type][vlan_index];
-
- if (flow == NULL)
- return;
- claim_zero(ibv_exp_destroy_flow(flow));
- hash_rxq->special_flow[flow_type][vlan_index] = NULL;
- DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
- (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
- hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
-}
-
-/**
- * Enable a special flow in a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param flow_type
- * Special flow type.
- * @param vlan_index
- * VLAN index to use.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
- enum hash_rxq_flow_type flow_type)
-{
- struct priv *priv = hash_rxq->priv;
- unsigned int i = 0;
- int ret;
-
- assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
- assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
- RTE_DIM(priv->vlan_filter));
- /* Add a special flow for each VLAN filter when relevant. */
- do {
- ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
- if (ret) {
- /* Failure, rollback. */
- while (i != 0)
- hash_rxq_special_flow_disable_vlan(hash_rxq,
- flow_type,
- --i);
- return ret;
- }
- } while (special_flow_init[flow_type].per_vlan &&
- ++i < priv->vlan_filter_n);
- return 0;
-}
-
-/**
- * Disable a special flow in a hash RX queue.
- *
- * @param hash_rxq
- * Pointer to hash RX queue structure.
- * @param flow_type
- * Special flow type.
- */
-static void
-hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
- enum hash_rxq_flow_type flow_type)
-{
- unsigned int i;
-
- assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
- for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
- hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
-}
-
-/**
- * Enable a special flow in all hash RX queues.
- *
- * @param priv
- * Private structure.
- * @param flow_type
- * Special flow type.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
-{
- unsigned int i;
-
- if (!priv_allow_flow_type(priv, flow_type))
- return 0;
- for (i = 0; (i != priv->hash_rxqs_n); ++i) {
- struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
- int ret;
-
- ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (i != 0) {
- hash_rxq = &(*priv->hash_rxqs)[--i];
- hash_rxq_special_flow_disable(hash_rxq, flow_type);
- }
- return ret;
- }
- return 0;
-}
-
-/**
- * Disable a special flow in all hash RX queues.
- *
- * @param priv
- * Private structure.
- * @param flow_type
- * Special flow type.
- */
-void
-priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
-{
- unsigned int i;
-
- for (i = 0; (i != priv->hash_rxqs_n); ++i) {
- struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
-
- hash_rxq_special_flow_disable(hash_rxq, flow_type);
- }
-}
-
-/**
- * Enable all special flows in all hash RX queues.
- *
- * @param priv
- * Private structure.
- */
-int
-priv_special_flow_enable_all(struct priv *priv)
-{
- enum hash_rxq_flow_type flow_type;
-
- if (priv->isolated)
- return 0;
- for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
- flow_type != HASH_RXQ_FLOW_TYPE_MAC;
- ++flow_type) {
- int ret;
-
- ret = priv_special_flow_enable(priv, flow_type);
- if (!ret)
- continue;
- /* Failure, rollback. */
- while (flow_type)
- priv_special_flow_disable(priv, --flow_type);
- return ret;
- }
- return 0;
-}
-
-/**
- * Disable all special flows in all hash RX queues.
- *
- * @param priv
- * Private structure.
- */
-void
-priv_special_flow_disable_all(struct priv *priv)
-{
- enum hash_rxq_flow_type flow_type;
-
- for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
- flow_type != HASH_RXQ_FLOW_TYPE_MAC;
- ++flow_type)
- priv_special_flow_disable(priv, flow_type);
-}
-
/**
* DPDK callback to enable promiscuous mode.
*
@@ -391,19 +60,10 @@ priv_special_flow_disable_all(struct priv *priv)
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->promisc_req = 1;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while enabling promiscuous mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->promiscuous = 1;
+ mlx5_traffic_restart(dev);
}
/**
@@ -415,19 +75,10 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->promisc_req = 0;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while disabling promiscuous mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->promiscuous = 0;
+ mlx5_traffic_restart(dev);
}
/**
@@ -439,19 +90,10 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->allmulti_req = 1;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while enabling allmulticast mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->all_multicast = 1;
+ mlx5_traffic_restart(dev);
}
/**
@@ -463,17 +105,8 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->allmulti_req = 0;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while disabling allmulticast mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->all_multicast = 0;
+ mlx5_traffic_restart(dev);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 74387a79..6b29aaee 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -37,6 +37,7 @@
#include <string.h>
#include <stdint.h>
#include <fcntl.h>
+#include <sys/queue.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -44,25 +45,18 @@
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
-#include <infiniband/arch.h>
-#include <infiniband/mlx5_hw.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_debug.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <rte_io.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
@@ -70,122 +64,6 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
-/* Initialization data for hash RX queues. */
-const struct hash_rxq_init hash_rxq_init[] = {
- [HASH_RXQ_TCPV4] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
- IBV_EXP_RX_HASH_DST_IPV4 |
- IBV_EXP_RX_HASH_SRC_PORT_TCP |
- IBV_EXP_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
- .flow_priority = 0,
- .flow_spec.tcp_udp = {
- .type = IBV_EXP_FLOW_SPEC_TCP,
- .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
- },
- [HASH_RXQ_UDPV4] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
- IBV_EXP_RX_HASH_DST_IPV4 |
- IBV_EXP_RX_HASH_SRC_PORT_UDP |
- IBV_EXP_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
- .flow_priority = 0,
- .flow_spec.tcp_udp = {
- .type = IBV_EXP_FLOW_SPEC_UDP,
- .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
- },
- [HASH_RXQ_IPV4] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
- IBV_EXP_RX_HASH_DST_IPV4),
- .dpdk_rss_hf = (ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4),
- .flow_priority = 1,
- .flow_spec.ipv4 = {
- .type = IBV_EXP_FLOW_SPEC_IPV4,
- .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
- },
- [HASH_RXQ_TCPV6] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
- IBV_EXP_RX_HASH_DST_IPV6 |
- IBV_EXP_RX_HASH_SRC_PORT_TCP |
- IBV_EXP_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
- .flow_priority = 0,
- .flow_spec.tcp_udp = {
- .type = IBV_EXP_FLOW_SPEC_TCP,
- .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
- },
- [HASH_RXQ_UDPV6] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
- IBV_EXP_RX_HASH_DST_IPV6 |
- IBV_EXP_RX_HASH_SRC_PORT_UDP |
- IBV_EXP_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
- .flow_priority = 0,
- .flow_spec.tcp_udp = {
- .type = IBV_EXP_FLOW_SPEC_UDP,
- .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
- },
- [HASH_RXQ_IPV6] = {
- .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
- IBV_EXP_RX_HASH_DST_IPV6),
- .dpdk_rss_hf = (ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6),
- .flow_priority = 1,
- .flow_spec.ipv6 = {
- .type = IBV_EXP_FLOW_SPEC_IPV6,
- .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
- },
- .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
- },
- [HASH_RXQ_ETH] = {
- .hash_fields = 0,
- .dpdk_rss_hf = 0,
- .flow_priority = 2,
- .flow_spec.eth = {
- .type = IBV_EXP_FLOW_SPEC_ETH,
- .size = sizeof(hash_rxq_init[0].flow_spec.eth),
- },
- .underlayer = NULL,
- },
-};
-
-/* Number of entries in hash_rxq_init[]. */
-const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
-
-/* Initialization data for hash RX queue indirection tables. */
-static const struct ind_table_init ind_table_init[] = {
- {
- .max_size = -1u, /* Superseded by HW limitations. */
- .hash_types =
- 1 << HASH_RXQ_TCPV4 |
- 1 << HASH_RXQ_UDPV4 |
- 1 << HASH_RXQ_IPV4 |
- 1 << HASH_RXQ_TCPV6 |
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 0,
- .hash_types_n = 6,
- },
- {
- .max_size = 1,
- .hash_types = 1 << HASH_RXQ_ETH,
- .hash_types_n = 1,
- },
-};
-
-#define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
-
/* Default RSS hash key also used for ConnectX-3. */
uint8_t rss_hash_default_key[] = {
0x2c, 0xc6, 0x81, 0xd1,
@@ -204,495 +82,27 @@ uint8_t rss_hash_default_key[] = {
const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
/**
- * Populate flow steering rule for a given hash RX queue type using
- * information from hash_rxq_init[]. Nothing is written to flow_attr when
- * flow_attr_size is not large enough, but the required size is still returned.
- *
- * @param priv
- * Pointer to private structure.
- * @param[out] flow_attr
- * Pointer to flow attribute structure to fill. Note that the allocated
- * area must be larger and large enough to hold all flow specifications.
- * @param flow_attr_size
- * Entire size of flow_attr and trailing room for flow specifications.
- * @param type
- * Hash RX queue type to use for flow steering rule.
- *
- * @return
- * Total size of the flow attribute buffer. No errors are defined.
- */
-size_t
-priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
- size_t flow_attr_size, enum hash_rxq_type type)
-{
- size_t offset = sizeof(*flow_attr);
- const struct hash_rxq_init *init = &hash_rxq_init[type];
-
- assert(priv != NULL);
- assert((size_t)type < RTE_DIM(hash_rxq_init));
- do {
- offset += init->flow_spec.hdr.size;
- init = init->underlayer;
- } while (init != NULL);
- if (offset > flow_attr_size)
- return offset;
- flow_attr_size = offset;
- init = &hash_rxq_init[type];
- *flow_attr = (struct ibv_exp_flow_attr){
- .type = IBV_EXP_FLOW_ATTR_NORMAL,
- /* Priorities < 3 are reserved for flow director. */
- .priority = init->flow_priority + 3,
- .num_of_specs = 0,
- .port = priv->port,
- .flags = 0,
- };
- do {
- offset -= init->flow_spec.hdr.size;
- memcpy((void *)((uintptr_t)flow_attr + offset),
- &init->flow_spec,
- init->flow_spec.hdr.size);
- ++flow_attr->num_of_specs;
- init = init->underlayer;
- } while (init != NULL);
- return flow_attr_size;
-}
-
-/**
- * Convert hash type position in indirection table initializer to
- * hash RX queue type.
- *
- * @param table
- * Indirection table initializer.
- * @param pos
- * Hash type position.
- *
- * @return
- * Hash RX queue type.
- */
-static enum hash_rxq_type
-hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
-{
- enum hash_rxq_type type = HASH_RXQ_TCPV4;
-
- assert(pos < table->hash_types_n);
- do {
- if ((table->hash_types & (1 << type)) && (pos-- == 0))
- break;
- ++type;
- } while (1);
- return type;
-}
-
-/**
- * Filter out disabled hash RX queue types from ind_table_init[].
- *
- * @param priv
- * Pointer to private structure.
- * @param[out] table
- * Output table.
- *
- * @return
- * Number of table entries.
- */
-static unsigned int
-priv_make_ind_table_init(struct priv *priv,
- struct ind_table_init (*table)[IND_TABLE_INIT_N])
-{
- uint64_t rss_hf;
- unsigned int i;
- unsigned int j;
- unsigned int table_n = 0;
- /* Mandatory to receive frames not handled by normal hash RX queues. */
- unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
-
- rss_hf = priv->rss_hf;
- /* Process other protocols only if more than one queue. */
- if (priv->rxqs_n > 1)
- for (i = 0; (i != hash_rxq_init_n); ++i)
- if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
- hash_types_sup |= (1 << i);
-
- /* Filter out entries whose protocols are not in the set. */
- for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
- unsigned int nb;
- unsigned int h;
-
- /* j is increased only if the table has valid protocols. */
- assert(j <= i);
- (*table)[j] = ind_table_init[i];
- (*table)[j].hash_types &= hash_types_sup;
- for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
- if (((*table)[j].hash_types >> h) & 0x1)
- ++nb;
- (*table)[i].hash_types_n = nb;
- if (nb) {
- ++table_n;
- ++j;
- }
- }
- return table_n;
-}
-
-/**
- * Initialize hash RX queues and indirection table.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-priv_create_hash_rxqs(struct priv *priv)
-{
- struct ibv_exp_wq *wqs[priv->reta_idx_n];
- struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
- unsigned int ind_tables_n =
- priv_make_ind_table_init(priv, &ind_table_init);
- unsigned int hash_rxqs_n = 0;
- struct hash_rxq (*hash_rxqs)[] = NULL;
- struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
- unsigned int i;
- unsigned int j;
- unsigned int k;
- int err = 0;
-
- assert(priv->ind_tables == NULL);
- assert(priv->ind_tables_n == 0);
- assert(priv->hash_rxqs == NULL);
- assert(priv->hash_rxqs_n == 0);
- assert(priv->pd != NULL);
- assert(priv->ctx != NULL);
- if (priv->isolated)
- return 0;
- if (priv->rxqs_n == 0)
- return EINVAL;
- assert(priv->rxqs != NULL);
- if (ind_tables_n == 0) {
- ERROR("all hash RX queue types have been filtered out,"
- " indirection table cannot be created");
- return EINVAL;
- }
- if (priv->rxqs_n & (priv->rxqs_n - 1)) {
- INFO("%u RX queues are configured, consider rounding this"
- " number to the next power of two for better balancing",
- priv->rxqs_n);
- DEBUG("indirection table extended to assume %u WQs",
- priv->reta_idx_n);
- }
- for (i = 0; (i != priv->reta_idx_n); ++i) {
- struct rxq_ctrl *rxq_ctrl;
-
- rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
- struct rxq_ctrl, rxq);
- wqs[i] = rxq_ctrl->wq;
- }
- /* Get number of hash RX queues to configure. */
- for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
- hash_rxqs_n += ind_table_init[i].hash_types_n;
- DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
- hash_rxqs_n, priv->rxqs_n, ind_tables_n);
- /* Create indirection tables. */
- ind_tables = rte_calloc(__func__, ind_tables_n,
- sizeof((*ind_tables)[0]), 0);
- if (ind_tables == NULL) {
- err = ENOMEM;
- ERROR("cannot allocate indirection tables container: %s",
- strerror(err));
- goto error;
- }
- for (i = 0; (i != ind_tables_n); ++i) {
- struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
- .pd = priv->pd,
- .log_ind_tbl_size = 0, /* Set below. */
- .ind_tbl = wqs,
- .comp_mask = 0,
- };
- unsigned int ind_tbl_size = ind_table_init[i].max_size;
- struct ibv_exp_rwq_ind_table *ind_table;
-
- if (priv->reta_idx_n < ind_tbl_size)
- ind_tbl_size = priv->reta_idx_n;
- ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
- errno = 0;
- ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
- &ind_init_attr);
- if (ind_table != NULL) {
- (*ind_tables)[i] = ind_table;
- continue;
- }
- /* Not clear whether errno is set. */
- err = (errno ? errno : EINVAL);
- ERROR("RX indirection table creation failed with error %d: %s",
- err, strerror(err));
- goto error;
- }
- /* Allocate array that holds hash RX queues and related data. */
- hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
- sizeof((*hash_rxqs)[0]), 0);
- if (hash_rxqs == NULL) {
- err = ENOMEM;
- ERROR("cannot allocate hash RX queues container: %s",
- strerror(err));
- goto error;
- }
- for (i = 0, j = 0, k = 0;
- ((i != hash_rxqs_n) && (j != ind_tables_n));
- ++i) {
- struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
- enum hash_rxq_type type =
- hash_rxq_type_from_pos(&ind_table_init[j], k);
- struct rte_eth_rss_conf *priv_rss_conf =
- (*priv->rss_conf)[type];
- struct ibv_exp_rx_hash_conf hash_conf = {
- .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = (priv_rss_conf ?
- priv_rss_conf->rss_key_len :
- rss_hash_default_key_len),
- .rx_hash_key = (priv_rss_conf ?
- priv_rss_conf->rss_key :
- rss_hash_default_key),
- .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
- .rwq_ind_tbl = (*ind_tables)[j],
- };
- struct ibv_exp_qp_init_attr qp_init_attr = {
- .max_inl_recv = 0, /* Currently not supported. */
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RX_HASH),
- .pd = priv->pd,
- .rx_hash_conf = &hash_conf,
- .port_num = priv->port,
- };
-
- DEBUG("using indirection table %u for hash RX queue %u type %d",
- j, i, type);
- *hash_rxq = (struct hash_rxq){
- .priv = priv,
- .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
- .type = type,
- };
- if (hash_rxq->qp == NULL) {
- err = (errno ? errno : EINVAL);
- ERROR("Hash RX QP creation failure: %s",
- strerror(err));
- goto error;
- }
- if (++k < ind_table_init[j].hash_types_n)
- continue;
- /* Switch to the next indirection table and reset hash RX
- * queue type array index. */
- ++j;
- k = 0;
- }
- priv->ind_tables = ind_tables;
- priv->ind_tables_n = ind_tables_n;
- priv->hash_rxqs = hash_rxqs;
- priv->hash_rxqs_n = hash_rxqs_n;
- assert(err == 0);
- return 0;
-error:
- if (hash_rxqs != NULL) {
- for (i = 0; (i != hash_rxqs_n); ++i) {
- struct ibv_qp *qp = (*hash_rxqs)[i].qp;
-
- if (qp == NULL)
- continue;
- claim_zero(ibv_destroy_qp(qp));
- }
- rte_free(hash_rxqs);
- }
- if (ind_tables != NULL) {
- for (j = 0; (j != ind_tables_n); ++j) {
- struct ibv_exp_rwq_ind_table *ind_table =
- (*ind_tables)[j];
-
- if (ind_table == NULL)
- continue;
- claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
- }
- rte_free(ind_tables);
- }
- return err;
-}
-
-/**
- * Clean up hash RX queues and indirection table.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-priv_destroy_hash_rxqs(struct priv *priv)
-{
- unsigned int i;
-
- DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
- if (priv->hash_rxqs_n == 0) {
- assert(priv->hash_rxqs == NULL);
- assert(priv->ind_tables == NULL);
- return;
- }
- for (i = 0; (i != priv->hash_rxqs_n); ++i) {
- struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
- unsigned int j, k;
-
- assert(hash_rxq->priv == priv);
- assert(hash_rxq->qp != NULL);
- /* Also check that there are no remaining flows. */
- for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
- for (k = 0;
- (k != RTE_DIM(hash_rxq->special_flow[j]));
- ++k)
- assert(hash_rxq->special_flow[j][k] == NULL);
- for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
- for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
- assert(hash_rxq->mac_flow[j][k] == NULL);
- claim_zero(ibv_destroy_qp(hash_rxq->qp));
- }
- priv->hash_rxqs_n = 0;
- rte_free(priv->hash_rxqs);
- priv->hash_rxqs = NULL;
- for (i = 0; (i != priv->ind_tables_n); ++i) {
- struct ibv_exp_rwq_ind_table *ind_table =
- (*priv->ind_tables)[i];
-
- assert(ind_table != NULL);
- claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
- }
- priv->ind_tables_n = 0;
- rte_free(priv->ind_tables);
- priv->ind_tables = NULL;
-}
-
-/**
- * Check whether a given flow type is allowed.
- *
- * @param priv
- * Pointer to private structure.
- * @param type
- * Flow type to check.
- *
- * @return
- * Nonzero if the given flow type is allowed.
- */
-int
-priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
-{
- /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
- * has been requested. */
- if (priv->promisc_req)
- return type == HASH_RXQ_FLOW_TYPE_PROMISC;
- switch (type) {
- case HASH_RXQ_FLOW_TYPE_PROMISC:
- return !!priv->promisc_req;
- case HASH_RXQ_FLOW_TYPE_ALLMULTI:
- return !!priv->allmulti_req;
- case HASH_RXQ_FLOW_TYPE_BROADCAST:
- case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
- /* If allmulti is enabled, broadcast and ipv6multi
- * are unnecessary. */
- return !priv->allmulti_req;
- case HASH_RXQ_FLOW_TYPE_MAC:
- return 1;
- default:
- /* Unsupported flow type is not allowed. */
- return 0;
- }
- return 0;
-}
-
-/**
- * Automatically enable/disable flows according to configuration.
- *
- * @param priv
- * Private structure.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-int
-priv_rehash_flows(struct priv *priv)
-{
- enum hash_rxq_flow_type i;
-
- for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
- i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
- ++i)
- if (!priv_allow_flow_type(priv, i)) {
- priv_special_flow_disable(priv, i);
- } else {
- int ret = priv_special_flow_enable(priv, i);
-
- if (ret)
- return ret;
- }
- if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
- return priv_mac_addrs_enable(priv);
- priv_mac_addrs_disable(priv);
- return 0;
-}
-
-/**
- * Unlike regular Rx function, vPMD Rx doesn't replace mbufs immediately when
- * receiving packets. Instead it replaces later in bulk. In rxq->elts[], entries
- * from rq_pi to rq_ci are owned by device but the rest is already delivered to
- * application. In order not to reuse those mbufs by rxq_alloc_elts(), this
- * function must be called to replace used mbufs.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_trim_elts(struct rxq *rxq)
-{
- const uint16_t q_n = (1 << rxq->elts_n);
- const uint16_t q_mask = q_n - 1;
- uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
- uint16_t i;
-
- if (!rxq->trim_elts)
- return;
- for (i = 0; i < used; ++i)
- (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
- rxq->trim_elts = 0;
- return;
-}
-
-/**
* Allocate RX queue elements.
*
* @param rxq_ctrl
* Pointer to RX queue structure.
- * @param elts_n
- * Number of elements to allocate.
- * @param[in] pool
- * If not NULL, fetch buffers from this array instead of allocating them
- * with rte_pktmbuf_alloc().
*
* @return
* 0 on success, errno value on failure.
*/
-static int
-rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
- struct rte_mbuf *(*pool)[])
+int
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
+ unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
unsigned int i;
int ret = 0;
/* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
struct rte_mbuf *buf;
- volatile struct mlx5_wqe_data_seg *scat =
- &(*rxq_ctrl->rxq.wqes)[i];
-
- buf = (pool != NULL) ? (*pool)[i] : NULL;
- if (buf != NULL) {
- rte_pktmbuf_reset(buf);
- rte_pktmbuf_refcnt_update(buf, 1);
- } else
- buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+
+ buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
ret = ENOMEM;
@@ -711,21 +121,35 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
PKT_LEN(buf) = DATA_LEN(buf);
NB_SEGS(buf) = 1;
- /* scat->addr must be able to store a pointer. */
- assert(sizeof(scat->addr) >= sizeof(uintptr_t));
- *scat = (struct mlx5_wqe_data_seg){
- .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
- .byte_count = htonl(DATA_LEN(buf)),
- .lkey = htonl(rxq_ctrl->mr->lkey),
- };
(*rxq_ctrl->rxq.elts)[i] = buf;
}
+ /* If Rx vector is activated. */
+ if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
+ int j;
+
+ /* Initialize default rearm_data for vPMD. */
+ mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(mbuf_init, 1);
+ mbuf_init->nb_segs = 1;
+ mbuf_init->port = rxq->port_id;
+ /*
+ * prevent compiler reordering:
+ * rearm_data covers previous fields.
+ */
+ rte_compiler_barrier();
+ rxq->mbuf_initializer =
+ *(uint64_t *)&mbuf_init->rearm_data;
+ /* Padding with a fake mbuf for vectorized Rx. */
+ for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
+ (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
+ }
DEBUG("%p: allocated and configured %u segments (max %u packets)",
(void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
assert(ret == 0);
return 0;
error:
- assert(pool == NULL);
elts_n = i;
for (i = 0; (i != elts_n); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
@@ -744,19 +168,30 @@ error:
* Pointer to RX queue structure.
*/
static void
-rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- unsigned int i;
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_mask = q_n - 1;
+ uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t i;
- rxq_trim_elts(&rxq_ctrl->rxq);
DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
- if (rxq_ctrl->rxq.elts == NULL)
+ if (rxq->elts == NULL)
return;
-
- for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) {
- if ((*rxq_ctrl->rxq.elts)[i] != NULL)
- rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
- (*rxq_ctrl->rxq.elts)[i] = NULL;
+ /**
+ * Some mbuf in the Ring belongs to the application. They cannot be
+ * freed.
+ */
+ if (rxq_check_vec_support(rxq) > 0) {
+ for (i = 0; i < used; ++i)
+ (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
+ rxq->rq_pi = rxq->rq_ci;
+ }
+ for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ if ((*rxq->elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq->elts)[i]);
+ (*rxq->elts)[i] = NULL;
}
}
@@ -769,343 +204,15 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
* Pointer to RX queue structure.
*/
void
-rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
+mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
DEBUG("cleaning up %p", (void *)rxq_ctrl);
- rxq_free_elts(rxq_ctrl);
- if (rxq_ctrl->fdir_queue != NULL)
- priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
- if (rxq_ctrl->wq != NULL)
- claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
- if (rxq_ctrl->cq != NULL)
- claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
- if (rxq_ctrl->channel != NULL)
- claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
- if (rxq_ctrl->mr != NULL)
- claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
+ if (rxq_ctrl->ibv)
+ mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
}
/**
- * Initialize RX queue.
- *
- * @param tmpl
- * Pointer to RX queue control template.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static inline int
-rxq_setup(struct rxq_ctrl *tmpl)
-{
- struct ibv_cq *ibcq = tmpl->cq;
- struct ibv_mlx5_cq_info cq_info;
- struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
- struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
- rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
-
- if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
- ERROR("Unable to query CQ info. check your OFED.");
- return ENOTSUP;
- }
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
- return EINVAL;
- }
- if (elts == NULL)
- return ENOMEM;
- tmpl->rxq.rq_db = rwq->rq.db;
- tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
- tmpl->rxq.cq_ci = 0;
- tmpl->rxq.rq_ci = 0;
- tmpl->rxq.rq_pi = 0;
- tmpl->rxq.cq_db = cq_info.dbrec;
- tmpl->rxq.wqes =
- (volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)rwq->rq.buff;
- tmpl->rxq.cqes =
- (volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq_info.buf;
- tmpl->rxq.elts = elts;
- return 0;
-}
-
-/**
- * Configure a RX queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param rxq_ctrl
- * Pointer to RX queue structure.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
-{
- struct priv *priv = dev->data->dev_private;
- struct rxq_ctrl tmpl = {
- .priv = priv,
- .socket = socket,
- .rxq = {
- .elts_n = log2above(desc),
- .mp = mp,
- .rss_hash = priv->rxqs_n > 1,
- },
- };
- struct ibv_exp_wq_attr mod;
- union {
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_wq_init_attr wq;
- struct ibv_exp_cq_attr cq_attr;
- } attr;
- unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
- unsigned int cqe_n = desc - 1;
- struct rte_mbuf *(*elts)[desc] = NULL;
- int ret = 0;
-
- (void)conf; /* Thresholds configuration (ignored). */
- /* Enable scattered packets support for this queue if necessary. */
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
- tmpl.rxq.sges_n = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
- unsigned int size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
- unsigned int sges_n;
-
- /*
- * Determine the number of SGEs needed for a full packet
- * and round it to the next power of two.
- */
- sges_n = log2above((size / mb_len) + !!(size % mb_len));
- tmpl.rxq.sges_n = sges_n;
- /* Make sure rxq.sges_n did not overflow. */
- size = mb_len * (1 << tmpl.rxq.sges_n);
- size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
- ERROR("%p: too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- (void *)dev,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- return EOVERFLOW;
- }
- } else {
- WARN("%p: the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered"
- " mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
- }
- DEBUG("%p: maximum number of segments per packet: %u",
- (void *)dev, 1 << tmpl.rxq.sges_n);
- if (desc % (1 << tmpl.rxq.sges_n)) {
- ERROR("%p: number of RX queue descriptors (%u) is not a"
- " multiple of SGEs per packet (%u)",
- (void *)dev,
- desc,
- 1 << tmpl.rxq.sges_n);
- return EINVAL;
- }
- /* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
- tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
- tmpl.rxq.csum_l2tun =
- !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- /* Use the entire RX mempool as the memory region. */
- tmpl.mr = mlx5_mp2mr(priv->pd, mp);
- if (tmpl.mr == NULL) {
- ret = EINVAL;
- ERROR("%p: MR creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- if (dev->data->dev_conf.intr_conf.rxq) {
- tmpl.channel = ibv_create_comp_channel(priv->ctx);
- if (tmpl.channel == NULL) {
- ret = ENOMEM;
- ERROR("%p: Rx interrupt completion channel creation"
- " failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- }
- attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- };
- if (priv->cqe_comp) {
- attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
- attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
- /*
- * For vectorized Rx, it must not be doubled in order to
- * make cq_ci and rq_ci aligned.
- */
- if (rxq_check_vec_support(&tmpl.rxq) < 0)
- cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
- }
- tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
- &attr.cq);
- if (tmpl.cq == NULL) {
- ret = ENOMEM;
- ERROR("%p: CQ creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- /* Configure VLAN stripping. */
- tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
- !!dev->data->dev_conf.rxmode.hw_vlan_strip);
- attr.wq = (struct ibv_exp_wq_init_attr){
- .wq_context = NULL, /* Could be useful in the future. */
- .wq_type = IBV_EXP_WQT_RQ,
- /* Max number of outstanding WRs. */
- .max_recv_wr = desc >> tmpl.rxq.sges_n,
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = 1 << tmpl.rxq.sges_n,
- .pd = priv->pd,
- .cq = tmpl.cq,
- .comp_mask =
- IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
- 0,
- .vlan_offloads = (tmpl.rxq.vlan_strip ?
- IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
- 0),
- };
- /* By default, FCS (CRC) is stripped by hardware. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
- tmpl.rxq.crc_present = 0;
- } else if (priv->hw_fcs_strip) {
- /* Ask HW/Verbs to leave CRC in place when supported. */
- attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS;
- attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
- tmpl.rxq.crc_present = 1;
- } else {
- WARN("%p: CRC stripping has been disabled but will still"
- " be performed by hardware, make sure MLNX_OFED and"
- " firmware are up to date",
- (void *)dev);
- tmpl.rxq.crc_present = 0;
- }
- DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
- " incoming frames to hide it",
- (void *)dev,
- tmpl.rxq.crc_present ? "disabled" : "enabled",
- tmpl.rxq.crc_present << 2);
- if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
- ; /* Nothing else to do. */
- else if (priv->hw_padding) {
- INFO("%p: enabling packet padding on queue %p",
- (void *)dev, (void *)rxq_ctrl);
- attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING;
- attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
- } else
- WARN("%p: packet padding has been requested but is not"
- " supported, make sure MLNX_OFED and firmware are"
- " up to date",
- (void *)dev);
-
- tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
- if (tmpl.wq == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: WQ creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- /*
- * Make sure number of WRs*SGEs match expectations since a queue
- * cannot allocate more than "desc" buffers.
- */
- if (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) ||
- ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) {
- ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
- (void *)dev,
- (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),
- attr.wq.max_recv_wr, attr.wq.max_recv_sge);
- ret = EINVAL;
- goto error;
- }
- /* Save port ID. */
- tmpl.rxq.port_id = dev->data->port_id;
- DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
- /* Change queue state to ready. */
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_STATE,
- .wq_state = IBV_EXP_WQS_RDY,
- };
- ret = ibv_exp_modify_wq(tmpl.wq, &mod);
- if (ret) {
- ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- ret = rxq_setup(&tmpl);
- if (ret) {
- ERROR("%p: cannot initialize RX queue structure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- /* Reuse buffers from original queue if possible. */
- if (rxq_ctrl->rxq.elts_n) {
- assert(1 << rxq_ctrl->rxq.elts_n == desc);
- assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
- rxq_trim_elts(&rxq_ctrl->rxq);
- ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
- } else
- ret = rxq_alloc_elts(&tmpl, desc, NULL);
- if (ret) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- /* Clean up rxq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
- rxq_cleanup(rxq_ctrl);
- /* Move mbuf pointers to dedicated storage area in RX queue. */
- elts = (void *)(rxq_ctrl + 1);
- rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
-#ifndef NDEBUG
- memset(tmpl.rxq.elts, 0x55, sizeof(*elts));
-#endif
- rte_free(tmpl.rxq.elts);
- tmpl.rxq.elts = elts;
- *rxq_ctrl = tmpl;
- /* Update doorbell counter. */
- rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
- rte_wmb();
- *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
- DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
- assert(ret == 0);
- return 0;
-error:
- elts = tmpl.rxq.elts;
- rxq_cleanup(&tmpl);
- rte_free(elts);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * DPDK callback to configure a RX queue.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -1129,14 +236,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = (*priv->rxqs)[idx];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- const uint16_t desc_pad = MLX5_VPMD_DESCS_PER_LOOP; /* For vPMD. */
- int ret;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int ret = 0;
+ (void)conf;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
-
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -1152,50 +259,24 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
priv_unlock(priv);
return -EOVERFLOW;
}
- if (rxq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)rxq);
- if (priv->started) {
- priv_unlock(priv);
- return -EEXIST;
- }
- (*priv->rxqs)[idx] = NULL;
- rxq_cleanup(rxq_ctrl);
- /* Resize if rxq size is changed. */
- if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
- rxq_ctrl = rte_realloc(rxq_ctrl,
- sizeof(*rxq_ctrl) +
- (desc + desc_pad) *
- sizeof(struct rte_mbuf *),
- RTE_CACHE_LINE_SIZE);
- if (!rxq_ctrl) {
- ERROR("%p: unable to reallocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
- }
- } else {
- rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
- (desc + desc_pad) *
- sizeof(struct rte_mbuf *),
- 0, socket);
- if (rxq_ctrl == NULL) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
+ if (!mlx5_priv_rxq_releasable(priv, idx)) {
+ ret = EBUSY;
+ ERROR("%p: unable to release queue index %u",
+ (void *)dev, idx);
+ goto out;
}
- ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);
- if (ret)
- rte_free(rxq_ctrl);
- else {
- rxq_ctrl->rxq.stats.idx = idx;
- DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq_ctrl);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ mlx5_priv_rxq_release(priv, idx);
+ rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ if (!rxq_ctrl) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ ret = ENOMEM;
+ goto out;
}
+ DEBUG("%p: adding RX queue %p to list",
+ (void *)dev, (void *)rxq_ctrl);
+ (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+out:
priv_unlock(priv);
return -ret;
}
@@ -1209,76 +290,26 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
void
mlx5_rx_queue_release(void *dpdk_rxq)
{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
struct priv *priv;
- unsigned int i;
if (mlx5_is_secondary())
return;
if (rxq == NULL)
return;
- rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
priv_lock(priv);
- if (priv_flow_rxq_in_use(priv, rxq))
+ if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
rte_panic("Rx queue %p is still used by a flow and cannot be"
" removed\n", (void *)rxq_ctrl);
- for (i = 0; (i != priv->rxqs_n); ++i)
- if ((*priv->rxqs)[i] == rxq) {
- DEBUG("%p: removing RX queue %p from list",
- (void *)priv->dev, (void *)rxq_ctrl);
- (*priv->rxqs)[i] = NULL;
- break;
- }
- rxq_cleanup(rxq_ctrl);
- rte_free(rxq_ctrl);
+ mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
priv_unlock(priv);
}
/**
- * DPDK callback for RX in secondary processes.
- *
- * This function configures all queues from primary process information
- * if necessary before reverting to the normal RX burst callback.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct rxq *rxq = dpdk_rxq;
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct priv *priv = mlx5_secondary_data_setup(rxq_ctrl->priv);
- struct priv *primary_priv;
- unsigned int index;
-
- if (priv == NULL)
- return 0;
- primary_priv =
- mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
- /* Look for queue index in both private structures. */
- for (index = 0; index != priv->rxqs_n; ++index)
- if (((*primary_priv->rxqs)[index] == rxq) ||
- ((*priv->rxqs)[index] == rxq))
- break;
- if (index == priv->rxqs_n)
- return 0;
- rxq = (*priv->rxqs)[index];
- return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
-}
-
-/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
* @param priv
@@ -1296,6 +327,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ assert(!mlx5_is_secondary());
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
priv_rx_intr_vec_disable(priv);
@@ -1307,15 +339,14 @@ priv_rx_intr_vec_enable(struct priv *priv)
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct rxq_ctrl, rxq);
+ /* This rxq ibv must not be released in this function. */
+ struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
int fd;
int flags;
int rc;
/* Skip queues that cannot request interrupts. */
- if (!rxq || !rxq_ctrl->channel) {
+ if (!rxq_ibv || !rxq_ibv->channel) {
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
@@ -1329,7 +360,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
priv_rx_intr_vec_disable(priv);
return -1;
}
- fd = rxq_ctrl->channel->fd;
+ fd = rxq_ibv->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
@@ -1359,14 +390,61 @@ void
priv_rx_intr_vec_disable(struct priv *priv)
{
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ return;
+ if (!intr_handle->intr_vec)
+ goto free;
+ for (i = 0; i != n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq_data;
+
+ if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID)
+ continue;
+ /**
+ * Need to access directly the queue to release the reference
+ * kept in priv_rx_intr_vec_enable().
+ */
+ rxq_data = (*priv->rxqs)[i];
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
+ }
+free:
rte_intr_free_epoll_fd(intr_handle);
- free(intr_handle->intr_vec);
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
intr_handle->nb_efd = 0;
intr_handle->intr_vec = NULL;
}
-#ifdef HAVE_UPDATE_CQ_CI
+/**
+ * MLX5 CQ notification .
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ * @param sq_n_rxq
+ * Sequence number per receive queue .
+ */
+static inline void
+mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
+{
+ int sq_n = 0;
+ uint32_t doorbell_hi;
+ uint64_t doorbell;
+ void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
+
+ sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
+ doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
+ doorbell = (uint64_t)doorbell_hi << 32;
+ doorbell |= rxq->cqn;
+ rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
+ rte_wmb();
+ rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
+}
/**
* DPDK callback for Rx queue interrupt enable.
@@ -1383,16 +461,30 @@ int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- int ret;
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
- if (!rxq || !rxq_ctrl->channel) {
+ priv_lock(priv);
+ rxq_data = (*priv->rxqs)[rx_queue_id];
+ if (!rxq_data) {
ret = EINVAL;
- } else {
- ibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);
- ret = ibv_req_notify_cq(rxq_ctrl->cq, 0);
+ goto exit;
+ }
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->irq) {
+ struct mlx5_rxq_ibv *rxq_ibv;
+
+ rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ if (!rxq_ibv) {
+ ret = EINVAL;
+ goto exit;
+ }
+ mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
+ mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
}
+exit:
+ priv_unlock(priv);
if (ret)
WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
return -ret;
@@ -1413,25 +505,920 @@ int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ibv *rxq_ibv = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
- int ret;
+ int ret = 0;
- if (!rxq || !rxq_ctrl->channel) {
+ priv_lock(priv);
+ rxq_data = (*priv->rxqs)[rx_queue_id];
+ if (!rxq_data) {
ret = EINVAL;
- } else {
- ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != rxq_ctrl->cq)
- ret = EINVAL;
+ goto exit;
+ }
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (!rxq_ctrl->irq)
+ goto exit;
+ rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ if (!rxq_ibv) {
+ ret = EINVAL;
+ goto exit;
+ }
+ ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_ibv->cq) {
+ ret = EINVAL;
+ goto exit;
}
+ rxq_data->cq_arm_sn++;
+ ibv_ack_cq_events(rxq_ibv->cq, 1);
+exit:
+ if (rxq_ibv)
+ mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
+ priv_unlock(priv);
if (ret)
WARN("unable to disable interrupt on rx queue %d",
rx_queue_id);
- else
- ibv_ack_cq_events(rxq_ctrl->cq, 1);
return -ret;
}
-#endif /* HAVE_UPDATE_CQ_CI */
+/**
+ * Create the Rx queue Verbs object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object initialised if it can be created.
+ */
+struct mlx5_rxq_ibv*
+mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
+ union {
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq;
+ struct ibv_wq_init_attr wq;
+ struct ibv_cq_ex cq_attr;
+ } attr;
+ unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
+ struct mlx5_rxq_ibv *tmpl;
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_rwq rwq;
+ unsigned int i;
+ int ret = 0;
+ struct mlx5dv_obj obj;
+
+ assert(rxq_data);
+ assert(!rxq_ctrl->ibv);
+ tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ ERROR("%p: cannot allocate verbs resources",
+ (void *)rxq_ctrl);
+ goto error;
+ }
+ tmpl->rxq_ctrl = rxq_ctrl;
+ /* Use the entire RX mempool as the memory region. */
+ tmpl->mr = priv_mr_get(priv, rxq_data->mp);
+ if (!tmpl->mr) {
+ tmpl->mr = priv_mr_new(priv, rxq_data->mp);
+ if (!tmpl->mr) {
+ ERROR("%p: MR creation failure", (void *)rxq_ctrl);
+ goto error;
+ }
+ }
+ if (rxq_ctrl->irq) {
+ tmpl->channel = ibv_create_comp_channel(priv->ctx);
+ if (!tmpl->channel) {
+ ERROR("%p: Comp Channel creation failure",
+ (void *)rxq_ctrl);
+ goto error;
+ }
+ }
+ attr.cq.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = tmpl->channel,
+ .comp_mask = 0,
+ };
+ attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
+ .comp_mask = 0,
+ };
+ if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ attr.cq.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (rxq_check_vec_support(rxq_data) < 0)
+ attr.cq.ibv.cqe *= 2;
+ } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ DEBUG("Rx CQE compression is disabled for HW timestamp");
+ }
+ tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
+ &attr.cq.mlx5));
+ if (tmpl->cq == NULL) {
+ ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
+ goto error;
+ }
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
+ attr.wq = (struct ibv_wq_init_attr){
+ .wq_context = NULL, /* Could be useful in the future. */
+ .wq_type = IBV_WQT_RQ,
+ /* Max number of outstanding WRs. */
+ .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
+ /* Max number of scatter/gather elements in a WR. */
+ .max_sge = 1 << rxq_data->sges_n,
+ .pd = priv->pd,
+ .cq = tmpl->cq,
+ .comp_mask =
+ IBV_WQ_FLAGS_CVLAN_STRIPPING |
+ 0,
+ .create_flags = (rxq_data->vlan_strip ?
+ IBV_WQ_FLAGS_CVLAN_STRIPPING :
+ 0),
+ };
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq_data->crc_present) {
+ attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
+ if (priv->hw_padding) {
+ attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+#endif
+ tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
+ if (tmpl->wq == NULL) {
+ ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
+ goto error;
+ }
+ /*
+ * Make sure number of WRs*SGEs match expectations since a queue
+ * cannot allocate more than "desc" buffers.
+ */
+ if (((int)attr.wq.max_wr !=
+ ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
+ ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
+ ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
+ (void *)rxq_ctrl,
+ ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
+ (1 << rxq_data->sges_n),
+ attr.wq.max_wr, attr.wq.max_sge);
+ goto error;
+ }
+ /* Change queue state to ready. */
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ };
+ ret = ibv_modify_wq(tmpl->wq, &mod);
+ if (ret) {
+ ERROR("%p: WQ state to IBV_WQS_RDY failed",
+ (void *)rxq_ctrl);
+ goto error;
+ }
+ obj.cq.in = tmpl->cq;
+ obj.cq.out = &cq_info;
+ obj.rwq.in = tmpl->wq;
+ obj.rwq.out = &rwq;
+ ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
+ if (ret != 0)
+ goto error;
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ goto error;
+ }
+ /* Fill the rings. */
+ rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
+ (uintptr_t)rwq.buf;
+ for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
+ struct rte_mbuf *buf = (*rxq_data->elts)[i];
+ volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
+
+ /* scat->addr must be able to store a pointer. */
+ assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+ *scat = (struct mlx5_wqe_data_seg){
+ .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t)),
+ .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
+ .lkey = tmpl->mr->lkey,
+ };
+ }
+ rxq_data->rq_db = rwq.dbrec;
+ rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ rxq_data->cq_ci = 0;
+ rxq_data->rq_ci = 0;
+ rxq_data->rq_pi = 0;
+ rxq_data->zip = (struct rxq_zip){
+ .ai = 0,
+ };
+ rxq_data->cq_db = cq_info.dbrec;
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ rxq_data->cq_arm_sn = 0;
+ /* Update doorbell counter. */
+ rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
+ rte_wmb();
+ *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
+ DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
+ rte_atomic32_inc(&tmpl->refcnt);
+ DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+ return tmpl;
+error:
+ if (tmpl->wq)
+ claim_zero(ibv_destroy_wq(tmpl->wq));
+ if (tmpl->cq)
+ claim_zero(ibv_destroy_cq(tmpl->cq));
+ if (tmpl->channel)
+ claim_zero(ibv_destroy_comp_channel(tmpl->channel));
+ if (tmpl->mr)
+ priv_mr_release(priv, tmpl->mr);
+ return NULL;
+}
+
+/**
+ * Get an Rx queue Verbs object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object if it exists.
+ */
+struct mlx5_rxq_ibv*
+mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (idx >= priv->rxqs_n)
+ return NULL;
+ if (!rxq_data)
+ return NULL;
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->ibv) {
+ priv_mr_get(priv, rxq_data->mp);
+ rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
+ DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ (void *)rxq_ctrl->ibv,
+ rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
+ }
+ return rxq_ctrl->ibv;
+}
+
+/**
+ * Release an Rx verbs queue object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rxq_ibv
+ * Verbs Rx queue object.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+{
+ int ret;
+
+ assert(rxq_ibv);
+ assert(rxq_ibv->wq);
+ assert(rxq_ibv->cq);
+ assert(rxq_ibv->mr);
+ ret = priv_mr_release(priv, rxq_ibv->mr);
+ if (!ret)
+ rxq_ibv->mr = NULL;
+ DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
+ if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
+ rxq_free_elts(rxq_ibv->rxq_ctrl);
+ claim_zero(ibv_destroy_wq(rxq_ibv->wq));
+ claim_zero(ibv_destroy_cq(rxq_ibv->cq));
+ if (rxq_ibv->channel)
+ claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel));
+ LIST_REMOVE(rxq_ibv, next);
+ rte_free(rxq_ibv);
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Verify the Verbs Rx queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_rxq_ibv_verify(struct priv *priv)
+{
+ int ret = 0;
+ struct mlx5_rxq_ibv *rxq_ibv;
+
+ LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
+ DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
+ (void *)rxq_ibv);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Return true if a single reference exists on the object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rxq_ibv
+ * Verbs Rx queue object.
+ */
+int
+mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+{
+ (void)priv;
+ assert(rxq_ibv);
+ return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
+}
+
+/**
+ * Create a DPDK Rx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * A DPDK queue object on success.
+ */
+struct mlx5_rxq_ctrl*
+mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket, struct rte_mempool *mp)
+{
+ struct rte_eth_dev *dev = priv->dev;
+ struct mlx5_rxq_ctrl *tmpl;
+ const uint16_t desc_n =
+ desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+
+ tmpl = rte_calloc_socket("RXQ", 1,
+ sizeof(*tmpl) +
+ desc_n * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ tmpl->socket = socket;
+ if (priv->dev->data->dev_conf.intr_conf.rxq)
+ tmpl->irq = 1;
+ /* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl->rxq.sges_n = 0;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ unsigned int size =
+ RTE_PKTMBUF_HEADROOM +
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int sges_n;
+
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = log2above((size / mb_len) + !!(size % mb_len));
+ tmpl->rxq.sges_n = sges_n;
+ /* Make sure rxq.sges_n did not overflow. */
+ size = mb_len * (1 << tmpl->rxq.sges_n);
+ size -= RTE_PKTMBUF_HEADROOM;
+ if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ ERROR("%p: too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ (void *)dev,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ goto error;
+ }
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ DEBUG("%p: maximum number of segments per packet: %u",
+ (void *)dev, 1 << tmpl->rxq.sges_n);
+ if (desc % (1 << tmpl->rxq.sges_n)) {
+ ERROR("%p: number of RX queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ (void *)dev,
+ desc,
+ 1 << tmpl->rxq.sges_n);
+ goto error;
+ }
+ /* Toggle RX checksum offload if hardware supports it. */
+ if (priv->hw_csum)
+ tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ if (priv->hw_csum_l2tun)
+ tmpl->rxq.csum_l2tun =
+ !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ tmpl->rxq.hw_timestamp =
+ !!dev->data->dev_conf.rxmode.hw_timestamp;
+ /* Configure VLAN stripping. */
+ tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
+ !!dev->data->dev_conf.rxmode.hw_vlan_strip);
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ tmpl->rxq.crc_present = 0;
+ } else if (priv->hw_fcs_strip) {
+ tmpl->rxq.crc_present = 1;
+ } else {
+ WARN("%p: CRC stripping has been disabled but will still"
+ " be performed by hardware, make sure MLNX_OFED and"
+ " firmware are up to date",
+ (void *)dev);
+ tmpl->rxq.crc_present = 0;
+ }
+ DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ (void *)dev,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
+ /* Save port ID. */
+ tmpl->rxq.rss_hash = priv->rxqs_n > 1;
+ tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->priv = priv;
+ tmpl->rxq.mp = mp;
+ tmpl->rxq.stats.idx = idx;
+ tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ rte_atomic32_inc(&tmpl->refcnt);
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
+}
+
+/**
+ * Get a Rx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists.
+ */
+struct mlx5_rxq_ctrl*
+mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+
+ if ((*priv->rxqs)[idx]) {
+ rxq_ctrl = container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl,
+ rxq);
+
+ mlx5_priv_rxq_ibv_get(priv, idx);
+ rte_atomic32_inc(&rxq_ctrl->refcnt);
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ }
+ return rxq_ctrl;
+}
+
+/**
+ * Release a Rx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx])
+ return 0;
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ assert(rxq_ctrl->priv);
+ if (rxq_ctrl->ibv) {
+ int ret;
+
+ ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
+ if (!ret)
+ rxq_ctrl->ibv = NULL;
+ }
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+ LIST_REMOVE(rxq_ctrl, next);
+ rte_free(rxq_ctrl);
+ (*priv->rxqs)[idx] = NULL;
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released.
+ */
+int
+mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx])
+ return -1;
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_rxq_verify(struct priv *priv)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
+
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
+ (void *)rxq_ctrl);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an indirection table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * A new indirection table.
+ */
+struct mlx5_ind_table_ibv*
+mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
+ uint16_t queues_n)
+{
+ struct mlx5_ind_table_ibv *ind_tbl;
+ const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->ind_table_max_size);
+ struct ibv_wq *wq[1 << wq_n];
+ unsigned int i;
+ unsigned int j;
+
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0);
+ if (!ind_tbl)
+ return NULL;
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq =
+ mlx5_priv_rxq_get(priv, queues[i]);
+
+ if (!rxq)
+ goto error;
+ wq[i] = rxq->ibv->wq;
+ ind_tbl->queues[i] = queues[i];
+ }
+ ind_tbl->queues_n = queues_n;
+ /* Finalise indirection table. */
+ for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
+ wq[i] = wq[j];
+ ind_tbl->ind_table = ibv_create_rwq_ind_table(
+ priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = wq_n,
+ .ind_tbl = wq,
+ .comp_mask = 0,
+ });
+ if (!ind_tbl->ind_table)
+ goto error;
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ return ind_tbl;
+error:
+ rte_free(ind_tbl);
+ DEBUG("%p cannot create indirection table", (void *)priv);
+ return NULL;
+}
+
+/**
+ * Get an indirection table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * An indirection table if found.
+ */
+struct mlx5_ind_table_ibv*
+mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
+ uint16_t queues_n)
+{
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ if ((ind_tbl->queues_n == queues_n) &&
+ (memcmp(ind_tbl->queues, queues,
+ ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
+ == 0))
+ break;
+ }
+ if (ind_tbl) {
+ unsigned int i;
+
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
+ }
+ return ind_tbl;
+}
+
+/**
+ * Release an indirection table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param ind_table
+ * Indirection table to release.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_priv_ind_table_ibv_release(struct priv *priv,
+ struct mlx5_ind_table_ibv *ind_tbl)
+{
+ unsigned int i;
+
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+ claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
+ if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+ LIST_REMOVE(ind_tbl, next);
+ rte_free(ind_tbl);
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_ind_table_ibv_verify(struct priv *priv)
+{
+ struct mlx5_ind_table_ibv *ind_tbl;
+ int ret = 0;
+
+ LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
+ DEBUG("%p: Verbs indirection table %p still referenced",
+ (void *)priv, (void *)ind_tbl);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rss_key
+ * RSS key for the Rx hash queue.
+ * @param rss_key_len
+ * RSS key length.
+ * @param hash_fields
+ * Verbs protocol hash field to make the RSS on.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ *
+ * @return
+ * An hash Rx queue on success.
+ */
+struct mlx5_hrxq*
+mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
+ uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+{
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+
+ queues_n = hash_fields ? queues_n : 1;
+ ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
+ if (!ind_tbl)
+ return NULL;
+ qp = ibv_create_qp_ex(
+ priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len,
+ .rx_hash_key = rss_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ });
+ if (!qp)
+ goto error;
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+ if (!hrxq)
+ goto error;
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ hrxq->rss_key_len = rss_key_len;
+ hrxq->hash_fields = hash_fields;
+ memcpy(hrxq->rss_key, rss_key, rss_key_len);
+ rte_atomic32_inc(&hrxq->refcnt);
+ LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ return hrxq;
+error:
+ mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ if (qp)
+ claim_zero(ibv_destroy_qp(qp));
+ return NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rss_conf
+ * RSS configuration for the Rx hash queue.
+ * @param queues
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
+ * @param queues_n
+ * Number of queues.
+ *
+ * @return
+ * An hash Rx queue on success.
+ */
+struct mlx5_hrxq*
+mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
+ uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+{
+ struct mlx5_hrxq *hrxq;
+
+ queues_n = hash_fields ? queues_n : 1;
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ struct mlx5_ind_table_ibv *ind_tbl;
+
+ if (hrxq->rss_key_len != rss_key_len)
+ continue;
+ if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
+ continue;
+ if (hrxq->hash_fields != hash_fields)
+ continue;
+ ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ if (!ind_tbl)
+ continue;
+ if (ind_tbl != hrxq->ind_table) {
+ mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ continue;
+ }
+ rte_atomic32_inc(&hrxq->refcnt);
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ return hrxq;
+ }
+ return NULL;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param hrxq
+ * Pointer to Hash Rx queue to release.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
+{
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(ibv_destroy_qp(hrxq->qp));
+ mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
+ LIST_REMOVE(hrxq, next);
+ rte_free(hrxq);
+ return 0;
+ }
+ claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
+ return EBUSY;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_hrxq_ibv_verify(struct priv *priv)
+{
+ struct mlx5_hrxq *hrxq;
+ int ret = 0;
+
+ LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+ DEBUG("%p: Verbs Hash Rx queue %p still referenced",
+ (void *)priv, (void *)hrxq);
+ ++ret;
+ }
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index b07bcd11..9658b378 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -42,25 +42,17 @@
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
-#include <infiniband/arch.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
#include <rte_common.h>
#include <rte_branch_prediction.h>
#include <rte_ether.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -73,11 +65,11 @@ static __rte_always_inline uint32_t
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
uint16_t cqe_cnt, uint32_t *rss_hash);
static __rte_always_inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
@@ -105,6 +97,8 @@ mlx5_set_ptype_table(void)
* bit[6] = tunneled
* bit[7] = outer_l3_type
*/
+ /* L2 */
+ (*p)[0x00] = RTE_PTYPE_L2_ETHER;
/* L3 */
(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_NONFRAG;
@@ -171,29 +165,29 @@ mlx5_set_ptype_table(void)
/* Tunneled - TCP */
(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP;
+ RTE_PTYPE_INNER_L4_TCP;
(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP;
+ RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP;
+ RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP;
+ RTE_PTYPE_INNER_L4_TCP;
/* Tunneled - UDP */
(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP;
+ RTE_PTYPE_INNER_L4_UDP;
(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP;
+ RTE_PTYPE_INNER_L4_UDP;
(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP;
+ RTE_PTYPE_INNER_L4_UDP;
(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP;
+ RTE_PTYPE_INNER_L4_UDP;
}
/**
@@ -208,7 +202,7 @@ mlx5_set_ptype_table(void)
* Size of tailroom.
*/
static inline size_t
-tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
{
size_t tailroom;
tailroom = (uintptr_t)(txq->wqes) +
@@ -266,7 +260,7 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
int
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- struct txq *txq = tx_queue;
+ struct mlx5_txq_data *txq = tx_queue;
uint16_t used;
mlx5_tx_complete(txq);
@@ -290,7 +284,7 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
int
mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
- struct rxq *rxq = rx_queue;
+ struct mlx5_rxq_data *rxq = rx_queue;
struct rxq_zip *zip = &rxq->zip;
volatile struct mlx5_cqe *cqe;
const unsigned int cqe_n = (1 << rxq->cqe_n);
@@ -313,7 +307,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
op_own = cqe->op_own;
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
- n = ntohl(cqe->byte_cnt);
+ n = rte_be_to_cpu_32(cqe->byte_cnt);
else
n = 1;
cq_ci += n;
@@ -342,7 +336,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
uint16_t
mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -413,8 +407,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#ifdef MLX5_PMD_SOFT_COUNTERS
total_length = length;
#endif
- if (length < (MLX5_WQE_DWORD_SIZE + 2))
+ if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
+ txq->stats.oerrors++;
break;
+ }
/* Update element. */
(*txq->elts)[elts_head & elts_m] = buf;
/* Prefetch next buffer data. */
@@ -441,7 +437,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
- uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
+ uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
+ buf->vlan_tci);
unsigned int len = 2 * ETHER_ADDR_LEN - 2;
addr += 2;
@@ -461,6 +458,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
length -= pkt_inline_sz;
addr += pkt_inline_sz;
}
+ raw += MLX5_WQE_DWORD_SIZE;
if (txq->tso_en) {
tso = buf->ol_flags & PKT_TX_TCP_SEG;
if (tso) {
@@ -479,7 +477,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
tso_header_sz = buf->l2_len + vlan_sz +
buf->l3_len + buf->l4_len;
tso_segsz = buf->tso_segsz;
-
+ if (unlikely(tso_segsz == 0)) {
+ txq->stats.oerrors++;
+ break;
+ }
if (is_tunneled && txq->tunnel_en) {
tso_header_sz += buf->outer_l2_len +
buf->outer_l3_len;
@@ -488,12 +489,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
if (unlikely(tso_header_sz >
- MLX5_MAX_TSO_HEADER))
+ MLX5_MAX_TSO_HEADER)) {
+ txq->stats.oerrors++;
break;
+ }
copy_b = tso_header_sz - pkt_inline_sz;
/* First seg must contain all headers. */
assert(copy_b <= length);
- raw += MLX5_WQE_DWORD_SIZE;
if (copy_b &&
((end - (uintptr_t)raw) > copy_b)) {
uint16_t n = (MLX5_WQE_DS(copy_b) -
@@ -506,19 +508,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
+ /* Include padding for TSO header. */
+ copy_b = MLX5_WQE_DS(copy_b) *
+ MLX5_WQE_DWORD_SIZE;
pkt_inline_sz += copy_b;
- /*
- * Another DWORD will be added
- * in the inline part.
- */
- raw += MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE -
- MLX5_WQE_DWORD_SIZE;
+ raw += copy_b;
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
- htonl(txq->wqe_ci << 8),
- htonl(txq->qp_num_8s | 1),
+ rte_cpu_to_be_32(
+ txq->wqe_ci << 8),
+ rte_cpu_to_be_32(
+ txq->qp_num_8s | 1),
0,
0,
};
@@ -531,19 +532,20 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
/* Inline if enough room. */
if (inline_en || tso) {
+ uint32_t inl;
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) * MLX5_WQE_SIZE);
unsigned int inline_room = max_inline *
RTE_CACHE_LINE_SIZE -
- (pkt_inline_sz - 2);
+ (pkt_inline_sz - 2) -
+ !!tso * sizeof(inl);
uintptr_t addr_end = (addr + inline_room) &
~(RTE_CACHE_LINE_SIZE - 1);
unsigned int copy_b = (addr_end > addr) ?
RTE_MIN((addr_end - addr), length) :
0;
- raw += MLX5_WQE_DWORD_SIZE;
if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
/*
* One Dseg remains in the current WQE. To
@@ -556,12 +558,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
max_wqe -= n;
if (tso) {
- uint32_t inl =
- htonl(copy_b | MLX5_INLINE_SEG);
-
- pkt_inline_sz =
- MLX5_WQE_DS(tso_header_sz) *
- MLX5_WQE_DWORD_SIZE;
+ inl = rte_cpu_to_be_32(copy_b |
+ MLX5_INLINE_SEG);
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);
@@ -610,9 +608,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- naddr = htonll(addr);
+ naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@@ -649,9 +647,9 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@@ -664,27 +662,33 @@ next_seg:
else
j += sg;
next_pkt:
+ if (ds > MLX5_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
++elts_head;
++pkts;
++i;
/* Initialize known and common part of the WQE structure. */
if (tso) {
wqe->ctrl = (rte_v128u32_t){
- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
- htonl(txq->qp_num_8s | ds),
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
- cs_flags | (htons(tso_segsz) << 16),
+ cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
0,
- (ehdr << 16) | htons(tso_header_sz),
+ (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
} else {
wqe->ctrl = (rte_v128u32_t){
- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
- htonl(txq->qp_num_8s | ds),
+ rte_cpu_to_be_32((txq->wqe_ci << 8) |
+ MLX5_OPCODE_SEND),
+ rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
@@ -692,7 +696,7 @@ next_pkt:
0,
cs_flags,
0,
- (ehdr << 16) | htons(pkt_inline_sz),
+ (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
}
next_wqe:
@@ -712,7 +716,7 @@ next_wqe:
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
- last_wqe->ctrl2 = htonl(8);
+ last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
@@ -739,7 +743,7 @@ next_wqe:
* Packet length.
*/
static inline void
-mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
@@ -751,13 +755,14 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
mpw->wqe->eseg.rsvd2 = 0;
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
@@ -778,7 +783,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
* Pointer to MPW session structure.
*/
static inline void
-mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
unsigned int num = mpw->pkts_n;
@@ -786,7 +791,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
mpw->state = MLX5_MPW_STATE_CLOSED;
if (num < 3)
++txq->wqe_ci;
@@ -812,7 +817,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
uint16_t
mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -850,8 +855,10 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX)
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
break;
+ }
max_elts -= segs_n;
--pkts_n;
/* Should we enable HW CKSUM offload */
@@ -893,9 +900,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@@ -923,7 +930,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@@ -953,7 +960,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet length.
*/
static inline void
-mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
+ uint32_t length)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
struct mlx5_wqe_inl_small *inl;
@@ -963,12 +971,12 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
- mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.cs_flags = 0;
mpw->wqe->eseg.rsvd0 = 0;
@@ -988,7 +996,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
* Pointer to MPW session structure.
*/
static inline void
-mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
unsigned int size;
struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
@@ -999,9 +1007,10 @@ mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(size));
mpw->state = MLX5_MPW_STATE_CLOSED;
- inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+ inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
}
@@ -1022,7 +1031,7 @@ uint16_t
mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -1071,8 +1080,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (max_elts < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX)
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
break;
+ }
max_elts -= segs_n;
--pkts_n;
/*
@@ -1139,9 +1150,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count =
+ rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@@ -1213,7 +1225,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@@ -1245,7 +1257,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* Packet length.
*/
static inline void
-mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
@@ -1253,9 +1265,10 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
mpw->pkts_n = 0;
mpw->total_len = sizeof(struct mlx5_wqe);
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
- mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_ENHANCED_MPSW);
+ mpw->wqe->ctrl[0] =
+ rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_ENHANCED_MPSW);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
@@ -1263,9 +1276,9 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
/* Pad the first 2 DWORDs with zero-length inline header. */
- *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+ *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
- htonl(MLX5_INLINE_SEG);
+ rte_cpu_to_be_32(MLX5_INLINE_SEG);
mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
/* Start from the next WQEBB. */
mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
@@ -1286,14 +1299,15 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
* Number of consumed WQEs.
*/
static inline uint16_t
-mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
uint16_t ret;
/* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+ mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+ MLX5_WQE_DS(mpw->total_len));
mpw->state = MLX5_MPW_STATE_CLOSED;
ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
txq->wqe_ci += ret;
@@ -1316,7 +1330,7 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
uint16_t
mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -1360,8 +1374,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (max_elts - j < segs_n)
break;
/* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX)
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
break;
+ }
/* Should we enable HW CKSUM offload. */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
@@ -1446,9 +1462,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
- .byte_count = htonl(DATA_LEN(buf)),
+ .byte_count = rte_cpu_to_be_32(
+ DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = htonll(addr),
+ .addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@@ -1471,7 +1488,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
assert(length == DATA_LEN(buf));
- inl_hdr = htonl(length | MLX5_INLINE_SEG);
+ inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
mpw.data.raw = (volatile void *)
((uintptr_t)mpw.data.raw + inl_pad);
@@ -1527,9 +1544,9 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
n * RTE_CACHE_LINE_SIZE));
- naddr = htonll(addr);
+ naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t) {
- htonl(length),
+ rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@@ -1557,7 +1574,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@@ -1627,7 +1644,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
* with error.
*/
static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
uint16_t cqe_cnt, uint32_t *rss_hash)
{
struct rxq_zip *zip = &rxq->zip;
@@ -1641,8 +1658,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
- len = ntohl((*mc)[zip->ai & 7].byte_cnt);
- *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
+ len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+ *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
@@ -1690,7 +1707,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
cqe_cnt].pkt_info);
/* Fix endianness. */
- zip->cqe_cnt = ntohl(cqe->byte_cnt);
+ zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
/*
* Current mini array position is the one returned by
* check_cqe64().
@@ -1705,8 +1722,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
--rxq->cq_ci;
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
- len = ntohl((*mc)[0].byte_cnt);
- *rss_hash = ntohl((*mc)[0].rx_hash_result);
+ len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+ *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
@@ -1716,8 +1733,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
++idx;
}
} else {
- len = ntohl(cqe->byte_cnt);
- *rss_hash = ntohl(cqe->rx_hash_res);
+ len = rte_be_to_cpu_32(cqe->byte_cnt);
+ *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1738,10 +1755,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
- uint16_t flags = ntohs(cqe->hdr_type_etc);
+ uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
ol_flags =
TRANSPOSE(flags,
@@ -1777,7 +1794,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
uint16_t
mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct rxq *rxq = dpdk_rxq;
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
const unsigned int sges_n = rxq->sges_n;
@@ -1848,7 +1865,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
pkt->ol_flags |= PKT_RX_FDIR;
if (cqe->sop_drop_qpn !=
- htonl(MLX5_FLOW_MARK_DEFAULT)) {
+ rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
uint32_t mark = cqe->sop_drop_qpn;
pkt->ol_flags |= PKT_RX_FDIR_ID;
@@ -1860,10 +1877,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
- htons(MLX5_CQE_VLAN_STRIPPED))) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT |
+ rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN |
PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci = ntohs(cqe->vlan_info);
+ pkt->vlan_tci =
+ rte_be_to_cpu_16(cqe->vlan_info);
+ }
+ if (rxq->hw_timestamp) {
+ pkt->timestamp =
+ rte_be_to_cpu_64(cqe->timestamp);
+ pkt->ol_flags |= PKT_RX_TIMESTAMP;
}
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
@@ -1879,7 +1902,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* of the buffers are already known, only the buffer address
* changes.
*/
- wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
+ wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
@@ -1906,10 +1929,10 @@ skip:
return 0;
/* Update the consumer index. */
rxq->rq_ci = rq_ci >> sges_n;
- rte_wmb();
- *rxq->cq_db = htonl(rxq->cq_ci);
- rte_wmb();
- *rxq->rq_db = htonl(rxq->rq_ci);
+ rte_io_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ rte_io_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment packets counter. */
rxq->stats.ipackets += i;
@@ -2016,7 +2039,7 @@ priv_check_vec_tx_support(struct priv *priv)
}
int __attribute__((weak))
-rxq_check_vec_support(struct rxq *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
(void)rxq;
return -ENOTSUP;
@@ -2028,9 +2051,3 @@ priv_check_vec_rx_support(struct priv *priv)
(void)priv;
return -ENOTSUP;
}
-
-void __attribute__((weak))
-priv_prep_vec_rx_function(struct priv *priv)
-{
- (void)priv;
-}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 7de1d108..d34f3cc0 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -36,6 +36,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <sys/queue.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -43,21 +44,16 @@
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <rte_hexdump.h>
+#include <rte_atomic.h>
#include "mlx5_utils.h"
#include "mlx5.h"
@@ -81,19 +77,22 @@ struct mlx5_txq_stats {
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
#endif
- uint64_t odropped; /**< Total of packets not sent when TX ring full. */
-};
-
-/* Flow director queue structure. */
-struct fdir_queue {
- struct ibv_qp *qp; /* Associated RX QP. */
- struct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */
- struct ibv_exp_wq *wq; /* Work queue. */
- struct ibv_cq *cq; /* Completion queue. */
+ uint64_t oerrors; /**< Total number of failed transmitted packets. */
};
struct priv;
+/* Memory region queue object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
+ rte_atomic32_t refcnt; /*<< Reference counter. */
+ uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
+ uintptr_t start; /* Start address of MR */
+ uintptr_t end; /* End address of MR */
+ struct ibv_mr *mr; /*<< Memory Region. */
+ struct rte_mempool *mp; /*<< Memory Pool. */
+};
+
/* Compressed CQE context. */
struct rxq_zip {
uint16_t ai; /* Array index. */
@@ -104,22 +103,22 @@ struct rxq_zip {
};
/* RX queue descriptor. */
-struct rxq {
+struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
unsigned int elts_n:4; /* Log 2 of Mbufs. */
- unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
unsigned int pending_err:1; /* CQE error needs to be handled. */
- unsigned int trim_elts:1; /* Whether elts needs clean-up. */
- unsigned int :6; /* Remaining bits. */
+ unsigned int :14; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
+ uint16_t port_id;
uint16_t rq_ci;
uint16_t rq_pi;
uint16_t cq_ci;
@@ -131,122 +130,56 @@ struct rxq {
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
+ void *cq_uar; /* CQ user access region. */
+ uint32_t cqn; /* CQ number. */
+ uint8_t cq_arm_sn; /* CQ arm seq number. */
} __rte_cache_aligned;
-/* RX queue control descriptor. */
-struct rxq_ctrl {
- struct priv *priv; /* Back pointer to private data. */
+/* Verbs Rx queue elements. */
+struct mlx5_rxq_ibv {
+ LIST_ENTRY(mlx5_rxq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_exp_wq *wq; /* Work Queue. */
- struct fdir_queue *fdir_queue; /* Flow director queue. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_wq *wq; /* Work Queue. */
struct ibv_comp_channel *channel;
- unsigned int socket; /* CPU socket ID for allocations. */
- struct rxq rxq; /* Data path structure. */
-};
-
-/* Hash RX queue types. */
-enum hash_rxq_type {
- HASH_RXQ_TCPV4,
- HASH_RXQ_UDPV4,
- HASH_RXQ_IPV4,
- HASH_RXQ_TCPV6,
- HASH_RXQ_UDPV6,
- HASH_RXQ_IPV6,
- HASH_RXQ_ETH,
-};
-
-/* Flow structure with Ethernet specification. It is packed to prevent padding
- * between attr and spec as this layout is expected by libibverbs. */
-struct flow_attr_spec_eth {
- struct ibv_exp_flow_attr attr;
- struct ibv_exp_flow_spec_eth spec;
-} __attribute__((packed));
-
-/* Define a struct flow_attr_spec_eth object as an array of at least
- * "size" bytes. Room after the first index is normally used to store
- * extra flow specifications. */
-#define FLOW_ATTR_SPEC_ETH(name, size) \
- struct flow_attr_spec_eth name \
- [((size) / sizeof(struct flow_attr_spec_eth)) + \
- !!((size) % sizeof(struct flow_attr_spec_eth))]
-
-/* Initialization data for hash RX queue. */
-struct hash_rxq_init {
- uint64_t hash_fields; /* Fields that participate in the hash. */
- uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
- unsigned int flow_priority; /* Flow priority to use. */
- union {
- struct {
- enum ibv_exp_flow_spec_type type;
- uint16_t size;
- } hdr;
- struct ibv_exp_flow_spec_tcp_udp tcp_udp;
- struct ibv_exp_flow_spec_ipv4 ipv4;
- struct ibv_exp_flow_spec_ipv6 ipv6;
- struct ibv_exp_flow_spec_eth eth;
- } flow_spec; /* Flow specification template. */
- const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
+ struct mlx5_mr *mr; /* Memory Region (for mp). */
};
-/* Initialization data for indirection table. */
-struct ind_table_init {
- unsigned int max_size; /* Maximum number of WQs. */
- /* Hash RX queues using this table. */
- unsigned int hash_types;
- unsigned int hash_types_n;
+/* RX queue control descriptor. */
+struct mlx5_rxq_ctrl {
+ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct priv *priv; /* Back pointer to private data. */
+ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
+ struct mlx5_rxq_data rxq; /* Data path structure. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ unsigned int irq:1; /* Whether IRQ is enabled. */
};
-/* Initialization data for special flows. */
-struct special_flow_init {
- uint8_t dst_mac_val[6];
- uint8_t dst_mac_mask[6];
- unsigned int hash_types;
- unsigned int per_vlan:1;
+/* Indirection table. */
+struct mlx5_ind_table_ibv {
+ LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ uint16_t queues_n; /**< Number of queues in the list. */
+ uint16_t queues[]; /**< Queue list. */
};
-enum hash_rxq_flow_type {
- HASH_RXQ_FLOW_TYPE_PROMISC,
- HASH_RXQ_FLOW_TYPE_ALLMULTI,
- HASH_RXQ_FLOW_TYPE_BROADCAST,
- HASH_RXQ_FLOW_TYPE_IPV6MULTI,
- HASH_RXQ_FLOW_TYPE_MAC,
-};
-
-#ifndef NDEBUG
-static inline const char *
-hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
-{
- switch (flow_type) {
- case HASH_RXQ_FLOW_TYPE_PROMISC:
- return "promiscuous";
- case HASH_RXQ_FLOW_TYPE_ALLMULTI:
- return "allmulticast";
- case HASH_RXQ_FLOW_TYPE_BROADCAST:
- return "broadcast";
- case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
- return "IPv6 multicast";
- case HASH_RXQ_FLOW_TYPE_MAC:
- return "MAC";
- }
- return NULL;
-}
-#endif /* NDEBUG */
-
-struct hash_rxq {
- struct priv *priv; /* Back pointer to private data. */
- struct ibv_qp *qp; /* Hash RX QP. */
- enum hash_rxq_type type; /* Hash RX queue type. */
- /* MAC flow steering rules, one per VLAN ID. */
- struct ibv_exp_flow *mac_flow
- [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
- struct ibv_exp_flow *special_flow
- [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
+/* Hash Rx queue. */
+struct mlx5_hrxq {
+ LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+ struct ibv_qp *qp; /* Verbs queue pair. */
+ uint64_t hash_fields; /* Verbs Hash fields. */
+ uint8_t rss_key_len; /* Hash key length in bytes. */
+ uint8_t rss_key[]; /* Hash key. */
};
/* TX queue descriptor. */
__extension__
-struct txq {
+struct mlx5_txq_data {
uint16_t elts_head; /* Current counter in (*elts)[]. */
uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
@@ -265,6 +198,7 @@ struct txq {
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
+ uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
uint32_t flags; /* Flags for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
@@ -272,61 +206,92 @@ struct txq {
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
- struct {
- uintptr_t start; /* Start address of MR */
- uintptr_t end; /* End address of MR */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* htonl(mr->lkey) */
- } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
- uint16_t mr_cache_idx; /* Index of last hit entry. */
+ struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
-/* TX queue control descriptor. */
-struct txq_ctrl {
- struct priv *priv; /* Back pointer to private data. */
+/* Verbs Rx queue elements. */
+struct mlx5_txq_ibv {
+ LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
+};
+
+/* TX queue control descriptor. */
+struct mlx5_txq_ctrl {
+ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
+ rte_atomic32_t refcnt; /* Reference counter. */
+ struct priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
- struct txq txq; /* Data path structure. */
+ unsigned int max_inline_data; /* Max inline data. */
+ unsigned int max_tso_header; /* Max TSO header size. */
+ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
+ struct mlx5_txq_data txq; /* Data path structure. */
+ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
};
/* mlx5_rxq.c */
-extern const struct hash_rxq_init hash_rxq_init[];
-extern const unsigned int hash_rxq_init_n;
-
extern uint8_t rss_hash_default_key[];
extern const size_t rss_hash_default_key_len;
-size_t priv_flow_attr(struct priv *, struct ibv_exp_flow_attr *,
- size_t, enum hash_rxq_type);
-int priv_create_hash_rxqs(struct priv *);
-void priv_destroy_hash_rxqs(struct priv *);
-int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
-int priv_rehash_flows(struct priv *);
-void rxq_cleanup(struct rxq_ctrl *);
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
-uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
int priv_rx_intr_vec_enable(struct priv *priv);
void priv_rx_intr_vec_disable(struct priv *priv);
-#ifdef HAVE_UPDATE_CQ_CI
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-#endif /* HAVE_UPDATE_CQ_CI */
+struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t);
+struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t);
+int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *);
+int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
+int mlx5_priv_rxq_ibv_verify(struct priv *);
+struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
+ uint16_t, unsigned int,
+ struct rte_mempool *);
+struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
+int mlx5_priv_rxq_release(struct priv *, uint16_t);
+int mlx5_priv_rxq_releasable(struct priv *, uint16_t);
+int mlx5_priv_rxq_verify(struct priv *);
+int rxq_alloc_elts(struct mlx5_rxq_ctrl *);
+struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *,
+ uint16_t [],
+ uint16_t);
+struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
+ uint16_t [],
+ uint16_t);
+int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
+int mlx5_priv_ind_table_ibv_verify(struct priv *);
+struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t,
+ uint64_t, uint16_t [], uint16_t);
+struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
+ uint64_t, uint16_t [], uint16_t);
+int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
+int mlx5_priv_hrxq_ibv_verify(struct priv *);
/* mlx5_txq.c */
-void txq_cleanup(struct txq_ctrl *);
-int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
- unsigned int, const struct rte_eth_txconf *);
int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_txconf *);
void mlx5_tx_queue_release(void *);
-uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
+int priv_tx_uar_remap(struct priv *priv, int fd);
+struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t);
+struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t);
+int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *);
+int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *);
+int mlx5_priv_txq_ibv_verify(struct priv *);
+struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t,
+ uint16_t, unsigned int,
+ const struct rte_eth_txconf *);
+struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t);
+int mlx5_priv_txq_release(struct priv *, uint16_t);
+int mlx5_priv_txq_releasable(struct priv *, uint16_t);
+int mlx5_priv_txq_verify(struct priv *);
+void txq_alloc_elts(struct mlx5_txq_ctrl *);
/* mlx5_rxtx.c */
@@ -346,18 +311,19 @@ int mlx5_tx_descriptor_status(void *, uint16_t);
/* Vectorized version of mlx5_rxtx.c */
int priv_check_raw_vec_tx_support(struct priv *);
int priv_check_vec_tx_support(struct priv *);
-int rxq_check_vec_support(struct rxq *);
+int rxq_check_vec_support(struct mlx5_rxq_data *);
int priv_check_vec_rx_support(struct priv *);
-void priv_prep_vec_rx_function(struct priv *);
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
/* mlx5_mr.c */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
-void txq_mp2mr_iter(struct rte_mempool *, void *);
-uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+void mlx5_mp2mr_iter(struct rte_mempool *, void *);
+struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *,
+ struct rte_mempool *, unsigned int);
+struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
+ unsigned int);
#ifndef NDEBUG
/**
@@ -419,16 +385,24 @@ check_cqe(volatile struct mlx5_cqe *cqe,
if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
- if (!check_cqe_seen(cqe))
+ if (!check_cqe_seen(cqe)) {
ERROR("unexpected CQE error %u (0x%02x)"
" syndrome 0x%02x",
op_code, op_code, syndrome);
+ rte_hexdump(stderr, "MLX5 Error CQE:",
+ (const void *)((uintptr_t)err_cqe),
+ sizeof(*err_cqe));
+ }
return 1;
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
- if (!check_cqe_seen(cqe))
+ if (!check_cqe_seen(cqe)) {
ERROR("unexpected CQE opcode %u (0x%02x)",
op_code, op_code);
+ rte_hexdump(stderr, "MLX5 CQE:",
+ (const void *)((uintptr_t)cqe),
+ sizeof(*cqe));
+ }
return 1;
}
#endif /* NDEBUG */
@@ -447,7 +421,7 @@ check_cqe(volatile struct mlx5_cqe *cqe,
* WQE address.
*/
static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
{
ci &= ((1 << txq->wqe_n) - 1);
return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
@@ -462,7 +436,7 @@ tx_mlx5_wqe(struct txq *txq, uint16_t ci)
* Pointer to TX queue structure.
*/
static __rte_always_inline void
-mlx5_tx_complete(struct txq *txq)
+mlx5_tx_complete(struct mlx5_txq_data *txq)
{
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -483,13 +457,18 @@ mlx5_tx_complete(struct txq *txq)
#ifndef NDEBUG
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
- if (!check_cqe_seen(cqe))
+ if (!check_cqe_seen(cqe)) {
ERROR("unexpected error CQE, TX stopped");
+ rte_hexdump(stderr, "MLX5 TXQ:",
+ (const void *)((uintptr_t)txq->wqes),
+ ((1 << txq->wqe_n) *
+ MLX5_WQE_SIZE));
+ }
return;
}
#endif /* NDEBUG */
++cq_ci;
- txq->wqe_pi = ntohs(cqe->wqe_counter);
+ txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
elts_tail = ctrl->ctrl3;
@@ -526,8 +505,8 @@ mlx5_tx_complete(struct txq *txq)
txq->cq_ci = cq_ci;
txq->elts_tail = elts_tail;
/* Update the consumer index. */
- rte_wmb();
- *txq->cq_db = htonl(cq_ci);
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(cq_ci);
}
/**
@@ -562,51 +541,80 @@ mlx5_tx_mb2mp(struct rte_mbuf *buf)
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
uint16_t i = txq->mr_cache_idx;
uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
+ struct mlx5_mr *mr;
assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
- return txq->mp2mr[i].lkey;
+ if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end >= addr))
+ return txq->mp2mr[i]->lkey;
for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mr == NULL)) {
+ if (unlikely(txq->mp2mr[i]->mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq->mp2mr[i].start <= addr &&
- txq->mp2mr[i].end >= addr) {
- assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(htonl(txq->mp2mr[i].mr->lkey) ==
- txq->mp2mr[i].lkey);
+ if (txq->mp2mr[i]->start <= addr &&
+ txq->mp2mr[i]->end >= addr) {
+ assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
+ assert(rte_cpu_to_be_32(txq->mp2mr[i]->mr->lkey) ==
+ txq->mp2mr[i]->lkey);
txq->mr_cache_idx = i;
- return txq->mp2mr[i].lkey;
+ return txq->mp2mr[i]->lkey;
}
}
txq->mr_cache_idx = 0;
- return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+ mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+ /*
+ * Request the reference to use in this queue, the original one is
+ * kept by the control plane.
+ */
+ if (mr) {
+ rte_atomic32_inc(&mr->refcnt);
+ return mr->lkey;
+ }
+ return (uint32_t)-1;
}
/**
- * Ring TX queue doorbell.
+ * Ring TX queue doorbell and flush the update if requested.
*
* @param txq
* Pointer to TX queue structure.
* @param wqe
* Pointer to the last WQE posted in the NIC.
+ * @param cond
+ * Request for write memory barrier after BlueFlame update.
*/
static __rte_always_inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
+ int cond)
{
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
- rte_wmb();
- *txq->qp_db = htonl(txq->wqe_ci);
+ rte_io_wmb();
+ *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
*dst = *src;
+ if (cond)
+ rte_wmb();
+}
+
+/**
+ * Ring TX queue doorbell and flush the update by write memory barrier.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
+ */
+static __rte_always_inline void
+mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
+{
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
}
#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
new file mode 100644
index 00000000..ba6c8cef
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -0,0 +1,388 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#if defined RTE_ARCH_X86_64
+#include "mlx5_rxtx_vec_sse.h"
+#elif defined RTE_ARCH_ARM64
+#include "mlx5_rxtx_vec_neon.h"
+#else
+#error "This should not be compiled if SIMD instructions are not supported."
+#endif
+
+/**
+ * Count the number of continuous single segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of continuous single segment packets.
+ */
+static inline unsigned int
+txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of continuous single segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of packets having same ol_flags and calculate cs_flags.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ * @param cs_flags
+ * Pointer of flags to be returned.
+ *
+ * @return
+ * Number of packets having same ol_flags.
+ */
+static inline unsigned int
+txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n, uint8_t *cs_flags)
+{
+ unsigned int pos;
+ const uint64_t ol_mask =
+ PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of packets having same ol_flags. */
+ for (pos = 1; pos < pkts_n; ++pos)
+ if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ break;
+ /* Should open another MPW session for the rest. */
+ if (pkts[0]->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled =
+ pkts[0]->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ *cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ return pos;
+}
+
+/**
+ * DPDK callback for vectorized TX.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * DPDK callback for vectorized TX with multi-seg packets and offload.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint8_t cs_flags = 0;
+ uint16_t n;
+ uint16_t ret;
+
+ /* Transmit multi-seg packets in the head of pkts list. */
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ NB_SEGS(pkts[nb_tx]) > 1)
+ nb_tx += txq_scatter_v(txq,
+ &pkts[nb_tx],
+ pkts_n - nb_tx);
+ n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ n = txq_check_multiseg(&pkts[nb_tx], n);
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ nb_tx += ret;
+ if (!ret)
+ break;
+ }
+ return nb_tx;
+}
+
+/**
+ * Skip error packets.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+static uint16_t
+rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t n = 0;
+ unsigned int i;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t err_bytes = 0;
+#endif
+
+ for (i = 0; i < pkts_n; ++i) {
+ struct rte_mbuf *pkt = pkts[i];
+
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ err_bytes += PKT_LEN(pkt);
+#endif
+ rte_pktmbuf_free_seg(pkt);
+ } else {
+ pkts[n++] = pkt;
+ }
+ }
+ rxq->stats.idropped += (pkts_n - n);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Correct counters of errored completions. */
+ rxq->stats.ipackets -= (pkts_n - n);
+ rxq->stats.ibytes -= err_bytes;
+#endif
+ rxq->pending_err = 0;
+ return n;
+}
+
+/**
+ * DPDK callback for vectorized RX.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ uint16_t nb_rx;
+
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
+ if (unlikely(rxq->pending_err))
+ nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
+ return nb_rx;
+}
+
+/**
+ * Check Tx queue flags are set for raw vectorized Tx.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_raw_vec_tx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->txqs_n; ++i) {
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
+
+ if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
+ !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ break;
+ }
+ if (i != priv->txqs_n)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized TX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_tx_support(struct priv *priv)
+{
+ if (!priv->tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->mps != MLX5_MPW_ENHANCED ||
+ priv->tso)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a RX queue can support vectorized RX.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+{
+ struct mlx5_rxq_ctrl *ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ return -ENOTSUP;
+ return 1;
+}
+
+/**
+ * Check a device can support vectorized RX.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+int __attribute__((cold))
+priv_check_vec_rx_support(struct priv *priv)
+{
+ uint16_t i;
+
+ if (!priv->rx_vec_en)
+ return -ENOTSUP;
+ /* All the configured queues should support. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (!rxq)
+ continue;
+ if (rxq_check_vec_support(rxq) < 0)
+ break;
+ }
+ if (i != priv->rxqs_n)
+ return -ENOTSUP;
+ return 1;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
new file mode 100644
index 00000000..1f08ed0b
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -0,0 +1,130 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_VEC_H_
+#define RTE_PMD_MLX5_RXTX_VEC_H_
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+
+#include "mlx5_autoconf.h"
+#include "mlx5_prm.h"
+
+/*
+ * Compile time sanity check for vectorized functions.
+ */
+
+#define S_ASSERT_RTE_MBUF(s) \
+ static_assert(s, "A field of struct rte_mbuf is changed")
+#define S_ASSERT_MLX5_CQE(s) \
+ static_assert(s, "A field of struct mlx5_cqe is changed")
+
+/* rxq_cq_decompress_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+/* rxq_cq_to_ptype_oflags_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+
+/* rxq_burst_v() */
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+#if (RTE_CACHE_LINE_SIZE == 128)
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
+#else
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
+#endif
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
+ offsetof(struct mlx5_cqe, pkt_info) + 12);
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
+ offsetof(struct mlx5_cqe, hdr_type_etc));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
+ offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
+ sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
+ offsetof(struct mlx5_cqe, byte_cnt));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
+ RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
+ offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
+
+/**
+ * Replenish buffers for RX in bulk.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param n
+ * Number of buffers to be replenished.
+ */
+static inline void
+mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
+{
+ const uint16_t q_n = 1 << rxq->elts_n;
+ const uint16_t q_mask = q_n - 1;
+ uint16_t elts_idx = rxq->rq_ci & q_mask;
+ struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
+ unsigned int i;
+
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
+ if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
+ rxq->stats.rx_nombuf += n;
+ return;
+ }
+ for (i = 0; i < n; ++i)
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ rxq->rq_ci += n;
+ /* Prevent overflowing into consumed mbufs. */
+ elts_idx = rxq->rq_ci & q_mask;
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
+ rte_io_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
new file mode 100644
index 00000000..c721d80e
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -0,0 +1,1039 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
+#define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arm_neon.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
+#include "mlx5_autoconf.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+/**
+ * Fill in buffer descriptors in a multi-packet send descriptor.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param dseg
+ * Pointer to buffer descriptor to be written.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param n
+ * Number of packets to be filled.
+ */
+static inline void
+txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
+ struct rte_mbuf **pkts, unsigned int n)
+{
+ unsigned int pos;
+ uintptr_t addr;
+ const uint8x16_t dseg_shuf_m = {
+ 3, 2, 1, 0, /* length, bswap32 */
+ 4, 5, 6, 7, /* lkey */
+ 15, 14, 13, 12, /* addr, bswap64 */
+ 11, 10, 9, 8
+ };
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t tx_byte = 0;
+#endif
+
+ for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
+ uint8x16_t desc;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ addr = rte_pktmbuf_mtod(pkt, uintptr_t);
+ desc = vreinterpretq_u8_u32((uint32x4_t) {
+ DATA_LEN(pkt),
+ mlx5_tx_mb2mr(txq, pkt),
+ addr,
+ addr >> 32 });
+ desc = vqtbl1q_u8(desc, dseg_shuf_m);
+ vst1q_u8(dseg, desc);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ tx_byte += DATA_LEN(pkt);
+#endif
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.obytes += tx_byte;
+#endif
+}
+
+/**
+ * Send multi-segmented packets until it encounters a single segment packet in
+ * the pkts list.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n;
+ volatile struct mlx5_wqe *wqe = NULL;
+
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ if (unlikely(!pkts_n))
+ return 0;
+ for (n = 0; n < pkts_n; ++n) {
+ struct rte_mbuf *buf = pkts[n];
+ unsigned int segs_n = buf->nb_segs;
+ unsigned int ds = nb_dword_in_hdr;
+ unsigned int len = PKT_LEN(buf);
+ uint16_t wqe_ci = txq->wqe_ci;
+ const uint8x16_t ctrl_shuf_m = {
+ 3, 2, 1, 0, /* bswap32 */
+ 7, 6, 5, 4, /* bswap32 */
+ 11, 10, 9, 8, /* bswap32 */
+ 12, 13, 14, 15
+ };
+ uint8_t cs_flags = 0;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint8x16_t *t_wqe;
+ uint8_t *dseg;
+ uint8x16_t ctrl;
+
+ assert(segs_n);
+ max_elts = elts_n - (elts_head - txq->elts_tail);
+ max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
+ /*
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (segs_n == 1 ||
+ max_elts < segs_n || max_wqe < 2)
+ break;
+ wqe = &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[wqe_ci & wq_mask].hdr;
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ const uint64_t is_tunneled =
+ buf->ol_flags & (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ /* Title WQEBB pointer. */
+ t_wqe = (uint8x16_t *)wqe;
+ dseg = (uint8_t *)(wqe + 1);
+ do {
+ if (!(ds++ % nb_dword_per_wqebb)) {
+ dseg = (uint8_t *)
+ &((volatile struct mlx5_wqe64 *)
+ txq->wqes)[++wqe_ci & wq_mask];
+ }
+ txq_wr_dseg_v(txq, dseg, &buf, 1);
+ dseg += MLX5_WQE_DWORD_SIZE;
+ (*txq->elts)[elts_head++ & elts_m] = buf;
+ buf = buf->next;
+ } while (--segs_n);
+ ++wqe_ci;
+ /* Fill CTRL in the header. */
+ ctrl = vreinterpretq_u8_u32((uint32x4_t) {
+ MLX5_OPC_MOD_MPW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
+ txq->qp_num_8s | ds, 0, 0});
+ ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
+ vst1q_u8((void *)t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ vst1q_u16((void *)(t_wqe + 1),
+ (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
+ 0, 0, 0, 0 });
+ txq->wqe_ci = wqe_ci;
+ }
+ if (!n)
+ return 0;
+ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
+ txq->elts_head = elts_head;
+ if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
+ wqe->ctrl[3] = txq->elts_head;
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += n;
+#endif
+ mlx5_tx_dbrec(txq, wqe);
+ return n;
+}
+
+/**
+ * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
+ * it returns to make it processed by txq_scatter_v(). All the packets in
+ * the pkts list should be single segment packets having same offload flags.
+ * This must be checked by txq_check_multiseg() and txq_calc_offload().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be sent.
+ * @param pkts_n
+ * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
+ * @param cs_flags
+ * Checksum offload flags to be written in the descriptor.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static inline uint16_t
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint8_t cs_flags)
+{
+ struct rte_mbuf **elts;
+ uint16_t elts_head = txq->elts_head;
+ const uint16_t elts_n = 1 << txq->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ const unsigned int nb_dword_per_wqebb =
+ MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
+ const unsigned int nb_dword_in_hdr =
+ sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
+ unsigned int n = 0;
+ unsigned int pos;
+ uint16_t max_elts;
+ uint16_t max_wqe;
+ uint32_t comp_req = 0;
+ const uint16_t wq_n = 1 << txq->wqe_n;
+ const uint16_t wq_mask = wq_n - 1;
+ uint16_t wq_idx = txq->wqe_ci & wq_mask;
+ volatile struct mlx5_wqe64 *wq =
+ &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
+ const uint8x16_t ctrl_shuf_m = {
+ 3, 2, 1, 0, /* bswap32 */
+ 7, 6, 5, 4, /* bswap32 */
+ 11, 10, 9, 8, /* bswap32 */
+ 12, 13, 14, 15
+ };
+ uint8x16_t *t_wqe;
+ uint8_t *dseg;
+ uint8x16_t ctrl;
+
+ /* Make sure all packets can fit into a single WQE. */
+ assert(elts_n > pkts_n);
+ mlx5_tx_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ if (unlikely(!pkts_n))
+ return 0;
+ elts = &(*txq->elts)[elts_head & elts_m];
+ /* Loop for available tailroom first. */
+ n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
+ for (pos = 0; pos < (n & -2); pos += 2)
+ vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
+ if (n & 1)
+ elts[pos] = pkts[pos];
+ /* Check if it crosses the end of the queue. */
+ if (unlikely(n < pkts_n)) {
+ elts = &(*txq->elts)[0];
+ for (pos = 0; pos < pkts_n - n; ++pos)
+ elts[pos] = pkts[n + pos];
+ }
+ txq->elts_head += pkts_n;
+ /* Save title WQEBB pointer. */
+ t_wqe = (uint8x16_t *)wqe;
+ dseg = (uint8_t *)(wqe + 1);
+ /* Calculate the number of entries to the end. */
+ n = RTE_MIN(
+ (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
+ pkts_n);
+ /* Fill DSEGs. */
+ txq_wr_dseg_v(txq, dseg, pkts, n);
+ /* Check if it crosses the end of the queue. */
+ if (n < pkts_n) {
+ dseg = (uint8_t *)txq->wqes;
+ txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
+ }
+ if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
+ txq->elts_comp += pkts_n;
+ } else {
+ /* Request a completion. */
+ txq->elts_comp = 0;
+ ++txq->cq_pi;
+ comp_req = 8;
+ }
+ /* Fill CTRL in the header. */
+ ctrl = vreinterpretq_u8_u32((uint32x4_t) {
+ MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
+ txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
+ txq->qp_num_8s | (pkts_n + 2),
+ comp_req,
+ txq->elts_head });
+ ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
+ vst1q_u8((void *)t_wqe, ctrl);
+ /* Fill ESEG in the header. */
+ vst1q_u8((void *)(t_wqe + 1),
+ (uint8x16_t) { 0, 0, 0, 0,
+ cs_flags, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 });
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ txq->stats.opackets += pkts_n;
+#endif
+ txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
+ nb_dword_per_wqebb;
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
+ return pkts_n;
+}
+
+/**
+ * Store free buffers to RX SW ring.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ */
+static inline void
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
+{
+ const uint16_t q_mask = (1 << rxq->elts_n) - 1;
+ struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
+ unsigned int pos;
+ uint16_t p = n & -2;
+
+ for (pos = 0; pos < p; pos += 2) {
+ uint64x2_t mbp;
+
+ mbp = vld1q_u64((void *)&elts[pos]);
+ vst1q_u64((void *)&pkts[pos], mbp);
+ }
+ if (n & 1)
+ pkts[pos] = elts[pos];
+}
+
+/**
+ * Decompress a compressed completion and fill in mbufs in RX SW ring with data
+ * extracted from the title completion descriptor.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param cq
+ * Pointer to completion array having a compressed completion at first.
+ * @param elts
+ * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
+ * the title completion descriptor to be copied to the rest of mbufs.
+ */
+static inline void
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+ struct rte_mbuf **elts)
+{
+ volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
+ struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
+ unsigned int pos;
+ unsigned int i;
+ unsigned int inv = 0;
+ /* Mask to shuffle from extracted mini CQE to mbuf. */
+ const uint8x16_t mcqe_shuf_m1 = {
+ -1, -1, -1, -1, /* skip packet_type */
+ 7, 6, -1, -1, /* pkt_len, bswap16 */
+ 7, 6, /* data_len, bswap16 */
+ -1, -1, /* skip vlan_tci */
+ 3, 2, 1, 0 /* hash.rss, bswap32 */
+ };
+ const uint8x16_t mcqe_shuf_m2 = {
+ -1, -1, -1, -1, /* skip packet_type */
+ 15, 14, -1, -1, /* pkt_len, bswap16 */
+ 15, 14, /* data_len, bswap16 */
+ -1, -1, /* skip vlan_tci */
+ 11, 10, 9, 8 /* hash.rss, bswap32 */
+ };
+ /* Restore the compressed count. Must be 16 bits. */
+ const uint16_t mcqe_n = t_pkt->data_len +
+ (rxq->crc_present * ETHER_CRC_LEN);
+ const uint64x2_t rearm =
+ vld1q_u64((void *)&t_pkt->rearm_data);
+ const uint32x4_t rxdf_mask = {
+ 0xffffffff, /* packet_type */
+ 0, /* skip pkt_len */
+ 0xffff0000, /* vlan_tci, skip data_len */
+ 0, /* skip hash.rss */
+ };
+ const uint8x16_t rxdf =
+ vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
+ vreinterpretq_u8_u32(rxdf_mask));
+ const uint16x8_t crc_adj = {
+ 0, 0,
+ rxq->crc_present * ETHER_CRC_LEN, 0,
+ rxq->crc_present * ETHER_CRC_LEN, 0,
+ 0, 0
+ };
+ const uint32_t flow_tag = t_pkt->hash.fdir.hi;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+#endif
+ /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
+ const uint8x8_t len_shuf_m = {
+ 7, 6, /* 1st mCQE */
+ 15, 14, /* 2nd mCQE */
+ 23, 22, /* 3rd mCQE */
+ 31, 30 /* 4th mCQE */
+ };
+
+ /*
+ * A. load mCQEs into a 128bit register.
+ * B. store rearm data to mbuf.
+ * C. combine data from mCQEs with rx_descriptor_fields1.
+ * D. store rx_descriptor_fields1.
+ * E. store flow tag (rte_flow mark).
+ */
+ for (pos = 0; pos < mcqe_n; ) {
+ uint8_t *p = (void *)&mcq[pos % 8];
+ uint8_t *e0 = (void *)&elts[pos]->rearm_data;
+ uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
+ uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
+ uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
+ uint16x4_t byte_cnt;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint16x4_t invalid_mask =
+ vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL << ((mcqe_n - pos) *
+ sizeof(uint16_t) * 8) : 0);
+#endif
+
+ if (!(pos & 0x7) && pos + 8 < mcqe_n)
+ rte_prefetch0((void *)(cq + pos + 8));
+ __asm__ volatile (
+ /* A.1 load mCQEs into a 128bit register. */
+ "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
+ /* B.1 store rearm data to mbuf. */
+ "st1 {%[rearm].2d}, [%[e0]] \n\t"
+ "add %[e0], %[e0], #16 \n\t"
+ "st1 {%[rearm].2d}, [%[e1]] \n\t"
+ "add %[e1], %[e1], #16 \n\t"
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
+ "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
+ "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
+ "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
+ "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
+ "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
+ /* D.1 store rx_descriptor_fields1. */
+ "st1 {v18.2d}, [%[e0]] \n\t"
+ "st1 {v19.2d}, [%[e1]] \n\t"
+ /* B.1 store rearm data to mbuf. */
+ "st1 {%[rearm].2d}, [%[e2]] \n\t"
+ "add %[e2], %[e2], #16 \n\t"
+ "st1 {%[rearm].2d}, [%[e3]] \n\t"
+ "add %[e3], %[e3], #16 \n\t"
+ /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
+ "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
+ "sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
+ "sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
+ "orr v18.16b, v18.16b, %[rxdf].16b \n\t"
+ "orr v19.16b, v19.16b, %[rxdf].16b \n\t"
+ /* D.1 store rx_descriptor_fields1. */
+ "st1 {v18.2d}, [%[e2]] \n\t"
+ "st1 {v19.2d}, [%[e3]] \n\t"
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t"
+#endif
+ :[byte_cnt]"=&w"(byte_cnt)
+ :[mcq]"r"(p),
+ [rxdf]"w"(rxdf),
+ [rearm]"w"(rearm),
+ [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [mcqe_shuf_m1]"w"(mcqe_shuf_m1),
+ [mcqe_shuf_m2]"w"(mcqe_shuf_m2),
+ [crc_adj]"w"(crc_adj),
+ [len_shuf_m]"w"(len_shuf_m)
+ :"memory", "v16", "v17", "v18", "v19");
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ byte_cnt = vbic_u16(byte_cnt, invalid_mask);
+ rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
+#endif
+ if (rxq->mark) {
+ /* E.1 store flow tag (rte_flow mark). */
+ elts[pos]->hash.fdir.hi = flow_tag;
+ elts[pos + 1]->hash.fdir.hi = flow_tag;
+ elts[pos + 2]->hash.fdir.hi = flow_tag;
+ elts[pos + 3]->hash.fdir.hi = flow_tag;
+ }
+ pos += MLX5_VPMD_DESCS_PER_LOOP;
+ /* Move to next CQE and invalidate consumed CQEs. */
+ if (!(pos & 0x7) && pos < mcqe_n) {
+ mcq = (void *)&(cq + pos)->pkt_info;
+ for (i = 0; i < 8; ++i)
+ cq[inv++].op_own = MLX5_CQE_INVALIDATE;
+ }
+ }
+ /* Invalidate the rest of CQEs. */
+ for (; inv < mcqe_n; ++inv)
+ cq[inv].op_own = MLX5_CQE_INVALIDATE;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += mcqe_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ rxq->cq_ci += mcqe_n;
+}
+
+/**
+ * Calculate packet type and offload flag for mbuf and store it.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param ptype_info
+ * Array of four 4bytes packet type info extracted from the original
+ * completion descriptor.
+ * @param flow_tag
+ * Array of four 4bytes flow ID extracted from the original completion
+ * descriptor.
+ * @param op_err
+ * Opcode vector having responder error status. Each field is 4B.
+ * @param pkts
+ * Pointer to array of packets to be filled.
+ */
+static inline void
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
+ uint32x4_t ptype_info, uint32x4_t flow_tag,
+ uint16x4_t op_err, struct rte_mbuf **pkts)
+{
+ uint16x4_t ptype;
+ uint32x4_t pinfo, cv_flags;
+ uint32x4_t ol_flags =
+ vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->hw_timestamp * PKT_RX_TIMESTAMP);
+ const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
+ const uint8x16_t cv_flag_sel = {
+ 0,
+ (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
+ (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+ 0,
+ (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ const uint32x4_t cv_mask =
+ vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
+ const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
+
+ if (rxq->mark) {
+ const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
+ const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
+ const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+
+ /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+ ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags,
+ vceqzq_u32(flow_tag)));
+ /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
+ ol_flags = vorrq_u32(ol_flags,
+ vbicq_u32(fdir_id_flags,
+ vceqq_u32(flow_tag, ft_def)));
+ }
+ /*
+ * ptype_info has the following:
+ * bit[1] = l3_ok
+ * bit[2] = l4_ok
+ * bit[8] = cv
+ * bit[11:10] = l3_hdr_type
+ * bit[14:12] = l4_hdr_type
+ * bit[15] = ip_frag
+ * bit[16] = tunneled
+ * bit[17] = outer_l3_type
+ */
+ ptype = vshrn_n_u32(ptype_info, 10);
+ /* Errored packets will have RTE_PTYPE_ALL_MASK. */
+ ptype = vorr_u16(ptype, op_err);
+ pkts[0]->packet_type =
+ mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
+ pkts[1]->packet_type =
+ mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
+ pkts[2]->packet_type =
+ mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
+ pkts[3]->packet_type =
+ mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+ /* Fill flags for checksum and VLAN. */
+ pinfo = vandq_u32(ptype_info, ptype_ol_mask);
+ pinfo = vreinterpretq_u32_u8(
+ vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo)));
+ /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
+ cv_flags = vshlq_n_u32(pinfo, 9);
+ cv_flags = vorrq_u32(pinfo, cv_flags);
+ /* Move back flags to start from byte[0]. */
+ cv_flags = vshrq_n_u32(cv_flags, 8);
+ /* Mask out garbage bits. */
+ cv_flags = vandq_u32(cv_flags, cv_mask);
+ /* Merge to ol_flags. */
+ ol_flags = vorrq_u32(ol_flags, cv_flags);
+ /* Merge mbuf_init and ol_flags, and store. */
+ rearm0 = vcombine_u64(mbuf_init,
+ vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32(
+ ol_flags)), 32));
+ rearm1 = vcombine_u64(mbuf_init,
+ vand_u64(vget_high_u64(vreinterpretq_u64_u32(
+ ol_flags)), r32_mask));
+ rearm2 = vcombine_u64(mbuf_init,
+ vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32(
+ ol_flags)), 32));
+ rearm3 = vcombine_u64(mbuf_init,
+ vand_u64(vget_low_u64(vreinterpretq_u64_u32(
+ ol_flags)), r32_mask));
+ vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
+ vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
+ vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
+ vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
+}
+
+/**
+ * Receive burst of packets. An errored completion also consumes a mbuf, but the
+ * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
+ * before returning to application.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets received including errors (<= pkts_n).
+ */
+static inline uint16_t
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ const uint16_t q_n = 1 << rxq->cqe_n;
+ const uint16_t q_mask = q_n - 1;
+ volatile struct mlx5_cqe *cq;
+ struct rte_mbuf **elts;
+ unsigned int pos;
+ uint64_t n;
+ uint16_t repl_n;
+ uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
+ uint16_t nocmp_n = 0;
+ uint16_t rcvd_pkt = 0;
+ unsigned int cq_idx = rxq->cq_ci & q_mask;
+ unsigned int elts_idx;
+ const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1)));
+ const uint16x4_t owner_check = vcreate_u16(0x0001000100010001);
+ const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0);
+ const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c);
+ const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t rcvd_byte = 0;
+#endif
+ /* Mask to generate 16B length vector. */
+ const uint8x8_t len_shuf_m = {
+ 52, 53, /* 4th CQE */
+ 36, 37, /* 3rd CQE */
+ 20, 21, /* 2nd CQE */
+ 4, 5 /* 1st CQE */
+ };
+ /* Mask to extract 16B data from a 64B CQE. */
+ const uint8x16_t cqe_shuf_m = {
+ 28, 29, /* hdr_type_etc */
+ 0, /* pkt_info */
+ -1, /* null */
+ 47, 46, /* byte_cnt, bswap16 */
+ 31, 30, /* vlan_info, bswap16 */
+ 15, 14, 13, 12, /* rx_hash_res, bswap32 */
+ 57, 58, 59, /* flow_tag */
+ 63 /* op_own */
+ };
+ /* Mask to generate 16B data for mbuf. */
+ const uint8x16_t mb_shuf_m = {
+ 4, 5, -1, -1, /* pkt_len */
+ 4, 5, /* data_len */
+ 6, 7, /* vlan_tci */
+ 8, 9, 10, 11, /* hash.rss */
+ 12, 13, 14, -1 /* hash.fdir.hi */
+ };
+ /* Mask to generate 16B owner vector. */
+ const uint8x8_t owner_shuf_m = {
+ 63, -1, /* 4th CQE */
+ 47, -1, /* 3rd CQE */
+ 31, -1, /* 2nd CQE */
+ 15, -1 /* 1st CQE */
+ };
+ /* Mask to generate a vector having packet_type/ol_flags. */
+ const uint8x16_t ptype_shuf_m = {
+ 48, 49, 50, -1, /* 4th CQE */
+ 32, 33, 34, -1, /* 3rd CQE */
+ 16, 17, 18, -1, /* 2nd CQE */
+ 0, 1, 2, -1 /* 1st CQE */
+ };
+ /* Mask to generate a vector having flow tags. */
+ const uint8x16_t ftag_shuf_m = {
+ 60, 61, 62, -1, /* 4th CQE */
+ 44, 45, 46, -1, /* 3rd CQE */
+ 28, 29, 30, -1, /* 2nd CQE */
+ 12, 13, 14, -1 /* 1st CQE */
+ };
+ const uint16x8_t crc_adj = {
+ 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0
+ };
+ const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
+
+ assert(rxq->sges_n == 0);
+ assert(rxq->cqe_n == rxq->elts_n);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch_non_temporal(cq);
+ rte_prefetch_non_temporal(cq + 1);
+ rte_prefetch_non_temporal(cq + 2);
+ rte_prefetch_non_temporal(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ /*
+ * Order of indexes:
+ * rq_ci >= cq_ci >= rq_pi
+ * Definition of indexes:
+ * rq_ci - cq_ci := # of buffers owned by HW (posted).
+ * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
+ * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
+ */
+ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->rq_pi += rcvd_pkt;
+ pkts += rcvd_pkt;
+ }
+ elts_idx = rxq->rq_pi & q_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ if (!pkts_n)
+ return rcvd_pkt;
+ /* At this point, there shouldn't be any remained packets. */
+ assert(rxq->rq_pi == rxq->cq_ci);
+ /*
+ * Note that vectors have reverse order - {v3, v2, v1, v0}, because
+ * there's no instruction to count trailing zeros. __builtin_clzl() is
+ * used instead.
+ *
+ * A. copy 4 mbuf pointers from elts ring to returing pkts.
+ * B. load 64B CQE and extract necessary fields
+ * Final 16bytes cqes[] extracted from original 64bytes CQE has the
+ * following structure:
+ * struct {
+ * uint16_t hdr_type_etc;
+ * uint8_t pkt_info;
+ * uint8_t rsvd;
+ * uint16_t byte_cnt;
+ * uint16_t vlan_info;
+ * uint32_t rx_has_res;
+ * uint8_t flow_tag[3];
+ * uint8_t op_own;
+ * } c;
+ * C. fill in mbuf.
+ * D. get valid CQEs.
+ * E. find compressed CQE.
+ */
+ for (pos = 0;
+ pos < pkts_n;
+ pos += MLX5_VPMD_DESCS_PER_LOOP) {
+ uint16x4_t op_own;
+ uint16x4_t opcode, owner_mask, invalid_mask;
+ uint16x4_t comp_mask;
+ uint16x4_t mask;
+ uint16x4_t byte_cnt;
+ uint32x4_t ptype_info, flow_tag;
+ uint8_t *p0, *p1, *p2, *p3;
+ uint8_t *e0 = (void *)&elts[pos]->pkt_len;
+ uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
+ uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len;
+ uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len;
+ void *elts_p = (void *)&elts[pos];
+ void *pkts_p = (void *)&pkts[pos];
+
+ /* A.0 do not cross the end of CQ. */
+ mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> ((pkts_n - pos) *
+ sizeof(uint16_t) * 8) : 0);
+ p0 = (void *)&cq[pos].pkt_info;
+ p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
+ p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
+ p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
+ /* Prefetch next 4 CQEs. */
+ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
+ unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
+ rte_prefetch_non_temporal(&cq[next]);
+ rte_prefetch_non_temporal(&cq[next + 1]);
+ rte_prefetch_non_temporal(&cq[next + 2]);
+ rte_prefetch_non_temporal(&cq[next + 3]);
+ }
+ __asm__ volatile (
+ /* B.1 (CQE 3) load a block having op_own. */
+ "ld1 {v19.16b}, [%[p3]] \n\t"
+ "sub %[p3], %[p3], #48 \n\t"
+ /* B.2 (CQE 3) load the rest blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
+ /* B.3 (CQE 3) extract 16B fields. */
+ "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.4 (CQE 3) adjust CRC length. */
+ "sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
+ /* B.1 (CQE 2) load a block having op_own. */
+ "ld1 {v19.16b}, [%[p2]] \n\t"
+ "sub %[p2], %[p2], #48 \n\t"
+ /* C.1 (CQE 3) generate final structure for mbuf. */
+ "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 2) load the rest blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
+ /* B.3 (CQE 2) extract 16B fields. */
+ "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.4 (CQE 2) adjust CRC length. */
+ "sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
+ /* B.1 (CQE 1) load a block having op_own. */
+ "ld1 {v19.16b}, [%[p1]] \n\t"
+ "sub %[p1], %[p1], #48 \n\t"
+ /* C.1 (CQE 2) generate final structure for mbuf. */
+ "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 1) load the rest blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
+ /* B.3 (CQE 1) extract 16B fields. */
+ "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.4 (CQE 1) adjust CRC length. */
+ "sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
+ /* B.1 (CQE 0) load a block having op_own. */
+ "ld1 {v19.16b}, [%[p0]] \n\t"
+ "sub %[p0], %[p0], #48 \n\t"
+ /* C.1 (CQE 1) generate final structure for mbuf. */
+ "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
+ /* B.2 (CQE 0) load the rest blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
+ /* B.3 (CQE 0) extract 16B fields. */
+ "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.4 (CQE 0) adjust CRC length. */
+ "sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
+ /* A.1 load mbuf pointers. */
+ "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
+ /* D.1 extract op_own byte. */
+ "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
+ /* C.2 (CQE 3) adjust flow mark. */
+ "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v15.2d}, [%[e3]] \n\t"
+ /* C.2 (CQE 2) adjust flow mark. */
+ "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v14.2d}, [%[e2]] \n\t"
+ /* C.1 (CQE 0) generate final structure for mbuf. */
+ "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
+ /* C.2 (CQE 1) adjust flow mark. */
+ "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v13.2d}, [%[e1]] \n\t"
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Extract byte_cnt. */
+ "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t"
+#endif
+ /* Extract ptype_info. */
+ "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t"
+ /* Extract flow_tag. */
+ "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t"
+ /* A.2 copy mbuf pointers. */
+ "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
+ /* C.2 (CQE 0) adjust flow mark. */
+ "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
+ /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ "st1 {v12.2d}, [%[e0]] \n\t"
+ :[op_own]"=&w"(op_own),
+ [byte_cnt]"=&w"(byte_cnt),
+ [ptype_info]"=&w"(ptype_info),
+ [flow_tag]"=&w"(flow_tag)
+ :[p3]"r"(p3 + 48), [p2]"r"(p2 + 48),
+ [p1]"r"(p1 + 48), [p0]"r"(p0 + 48),
+ [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [elts_p]"r"(elts_p),
+ [pkts_p]"r"(pkts_p),
+ [cqe_shuf_m]"w"(cqe_shuf_m),
+ [mb_shuf_m]"w"(mb_shuf_m),
+ [owner_shuf_m]"w"(owner_shuf_m),
+ [len_shuf_m]"w"(len_shuf_m),
+ [ptype_shuf_m]"w"(ptype_shuf_m),
+ [ftag_shuf_m]"w"(ftag_shuf_m),
+ [crc_adj]"w"(crc_adj),
+ [flow_mark_adj]"w"(flow_mark_adj)
+ :"memory",
+ "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19",
+ "v20", "v21", "v22", "v23",
+ "v24", "v25");
+ /* D.2 flip owner bit to mark CQEs from last round. */
+ owner_mask = vand_u16(op_own, owner_check);
+ owner_mask = vceq_u16(owner_mask, ownership);
+ /* D.3 get mask for invalidated CQEs. */
+ opcode = vand_u16(op_own, opcode_check);
+ invalid_mask = vceq_u16(opcode_check, opcode);
+ /* E.1 find compressed CQE format. */
+ comp_mask = vand_u16(op_own, format_check);
+ comp_mask = vceq_u16(comp_mask, format_check);
+ /* D.4 mask out beyond boundary. */
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.5 merge invalid_mask with invalid owner. */
+ invalid_mask = vorr_u16(invalid_mask, owner_mask);
+ /* E.2 mask out invalid entries. */
+ comp_mask = vbic_u16(comp_mask, invalid_mask);
+ /* E.3 get the first compressed CQE. */
+ comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
+ comp_mask), 0)) /
+ (sizeof(uint16_t) * 8);
+ /* D.6 mask out entries after the compressed CQE. */
+ mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
+ 0);
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.7 count non-compressed valid CQEs. */
+ n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
+ invalid_mask), 0)) / (sizeof(uint16_t) * 8);
+ nocmp_n += n;
+ /* D.2 get the final invalid mask. */
+ mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
+ -1UL >> (n * sizeof(uint16_t) * 8) : 0);
+ invalid_mask = vorr_u16(invalid_mask, mask);
+ /* D.3 check error in opcode. */
+ opcode = vceq_u16(resp_err_check, opcode);
+ opcode = vbic_u16(opcode, invalid_mask);
+ /* D.4 mark if any error is set */
+ rxq->pending_err |=
+ !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
+ /* C.4 fill in mbuf - rearm_data and packet_type. */
+ rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
+ opcode, &elts[pos]);
+ if (rxq->hw_timestamp) {
+ elts[pos]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p0, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 1]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p1, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 2]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p2, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ elts[pos + 3]->timestamp =
+ rte_be_to_cpu_64(
+ container_of(p3, struct mlx5_cqe,
+ pkt_info)->timestamp);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Add up received bytes count. */
+ byte_cnt = vbic_u16(byte_cnt, invalid_mask);
+ rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0);
+#endif
+ /*
+ * Break the loop unless more valid CQE is expected, or if
+ * there's a compressed CQE.
+ */
+ if (n != MLX5_VPMD_DESCS_PER_LOOP)
+ break;
+ }
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
+ return rcvd_pkt;
+ /* Update the consumer indexes for non-compressed CQEs. */
+ assert(nocmp_n <= pkts_n);
+ rxq->cq_ci += nocmp_n;
+ rxq->rq_pi += nocmp_n;
+ rcvd_pkt += nocmp_n;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ rxq->stats.ipackets += nocmp_n;
+ rxq->stats.ibytes += rcvd_byte;
+#endif
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
+ assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->cq_ci - rxq->rq_pi;
+
+ n = RTE_MIN(n, pkts_n - nocmp_n);
+ rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
+ rxq->rq_pi += n;
+ rcvd_pkt += n;
+ }
+ }
+ rte_compiler_barrier();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ return rcvd_pkt;
+}
+
+#endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 8560f745..2b9f1601 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -31,38 +31,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
+#define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
+
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <smmintrin.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
-#include <infiniband/arch.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
+#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_prm.h"
@@ -77,14 +62,14 @@
* @param txq
* Pointer to TX queue structure.
* @param dseg
- * Pointer to buffer descriptor to be writen.
+ * Pointer to buffer descriptor to be written.
* @param pkts
* Pointer to array of packets to be sent.
* @param n
* Number of packets to be filled.
*/
static inline void
-txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
struct rte_mbuf **pkts, unsigned int n)
{
unsigned int pos;
@@ -119,85 +104,6 @@ txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
}
/**
- * Count the number of continuous single segment packets. The first packet must
- * be a single segment packet.
- *
- * @param pkts
- * Pointer to array of packets.
- * @param pkts_n
- * Number of packets.
- *
- * @return
- * Number of continuous single segment packets.
- */
-static inline unsigned int
-txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- unsigned int pos;
-
- if (!pkts_n)
- return 0;
- assert(NB_SEGS(pkts[0]) == 1);
- /* Count the number of continuous single segment packets. */
- for (pos = 1; pos < pkts_n; ++pos)
- if (NB_SEGS(pkts[pos]) > 1)
- break;
- return pos;
-}
-
-/**
- * Count the number of packets having same ol_flags and calculate cs_flags.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets.
- * @param pkts_n
- * Number of packets.
- * @param cs_flags
- * Pointer of flags to be returned.
- *
- * @return
- * Number of packets having same ol_flags.
- */
-static inline unsigned int
-txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t *cs_flags)
-{
- unsigned int pos;
- const uint64_t ol_mask =
- PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
-
- if (!pkts_n)
- return 0;
- /* Count the number of packets having same ol_flags. */
- for (pos = 1; pos < pkts_n; ++pos)
- if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
- break;
- /* Should open another MPW session for the rest. */
- if (pkts[0]->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled =
- pkts[0]->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- if (is_tunneled && txq->tunnel_en) {
- *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- *cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
- return pos;
-}
-
-/**
* Send multi-segmented packets until it encounters a single segment packet in
* the pkts list.
*
@@ -212,7 +118,8 @@ txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
* Number of packets successfully transmitted (<= pkts_n).
*/
static uint16_t
-txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
{
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
@@ -257,13 +164,17 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (segs_n == 1 ||
max_elts < segs_n || max_wqe < 2)
break;
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled = buf->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
+ const uint64_t is_tunneled =
+ buf->ol_flags & (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
if (is_tunneled && txq->tunnel_en) {
cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
@@ -298,7 +209,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
_mm_set_epi16(0, 0, 0, 0,
- htons(len), cs_flags,
+ rte_cpu_to_be_16(len), cs_flags,
0, 0));
txq->wqe_ci = wqe_ci;
}
@@ -307,7 +218,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
++txq->cq_pi;
@@ -338,7 +249,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
-txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
uint8_t cs_flags)
{
struct rte_mbuf **elts;
@@ -374,6 +285,7 @@ txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
if (unlikely(!pkts_n))
return 0;
elts = &(*txq->elts)[elts_head & elts_m];
@@ -432,87 +344,11 @@ txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
nb_dword_per_wqebb;
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, wqe);
+ mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
return pkts_n;
}
/**
- * DPDK callback for vectorized TX.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct txq *txq = (struct txq *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint16_t n;
- uint16_t ret;
-
- n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
- nb_tx += ret;
- if (!ret)
- break;
- }
- return nb_tx;
-}
-
-/**
- * DPDK callback for vectorized TX with multi-seg packets and offload.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct txq *txq = (struct txq *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint8_t cs_flags = 0;
- uint16_t n;
- uint16_t ret;
-
- /* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
- NB_SEGS(pkts[nb_tx]) > 1)
- nb_tx += txq_scatter_v(txq,
- &pkts[nb_tx],
- pkts_n - nb_tx);
- n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
- n = txq_check_multiseg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
- nb_tx += ret;
- if (!ret)
- break;
- }
- return nb_tx;
-}
-
-/**
* Store free buffers to RX SW ring.
*
* @param rxq
@@ -523,7 +359,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets to be stored.
*/
static inline void
-rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
{
const uint16_t q_mask = (1 << rxq->elts_n) - 1;
struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
@@ -541,41 +377,6 @@ rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
}
/**
- * Replenish buffers for RX in bulk.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param n
- * Number of buffers to be replenished.
- */
-static inline void
-rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
-{
- const uint16_t q_n = 1 << rxq->elts_n;
- const uint16_t q_mask = q_n - 1;
- const uint16_t elts_idx = rxq->rq_ci & q_mask;
- struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
- volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
- unsigned int i;
-
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
- assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
- /* Not to cross queue end. */
- n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
- if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
- rxq->stats.rx_nombuf += n;
- return;
- }
- for (i = 0; i < n; ++i)
- wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
- RTE_PKTMBUF_HEADROOM);
- rxq->rq_ci += n;
- rte_wmb();
- *rxq->rq_db = htonl(rxq->rq_ci);
-}
-
-/**
* Decompress a compressed completion and fill in mbufs in RX SW ring with data
* extracted from the title completion descriptor.
*
@@ -588,8 +389,7 @@ rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
* the title completion descriptor to be copied to the rest of mbufs.
*/
static inline void
-rxq_cq_decompress_v(struct rxq *rxq,
- volatile struct mlx5_cqe *cq,
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
{
volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
@@ -636,13 +436,6 @@ rxq_cq_decompress_v(struct rxq *rxq,
10, 11, 2, 3);
#endif
- /* Compile time sanity check for this function. */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
@@ -747,12 +540,13 @@ rxq_cq_decompress_v(struct rxq *rxq,
* Pointer to array of packets to be filled.
*/
static inline void
-rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
- struct rte_mbuf **pkts)
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
+ __m128i op_err, struct rte_mbuf **pkts)
{
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
- __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
+ __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
+ rxq->hw_timestamp * PKT_RX_TIMESTAMP);
__m128i cv_flags;
const __m128i zero = _mm_setzero_si128();
const __m128i ptype_mask =
@@ -769,17 +563,17 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
(uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
0,
(uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
- (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
+ (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
0);
const __m128i cv_mask =
_mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
const __m128i mbuf_init =
_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
@@ -853,15 +647,11 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
/* Merge to ol_flags. */
ol_flags = _mm_or_si128(ol_flags, cv_flags);
/* Merge mbuf_init and ol_flags. */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
- offsetof(struct rte_mbuf, rearm_data) + 8);
rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
/* Write 8B rearm_data and 8B ol_flags. */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
- RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
_mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
_mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
_mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
@@ -869,51 +659,6 @@ rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
}
/**
- * Skip error packets.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-static uint16_t
-rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- uint16_t n = 0;
- unsigned int i;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t err_bytes = 0;
-#endif
-
- for (i = 0; i < pkts_n; ++i) {
- struct rte_mbuf *pkt = pkts[i];
-
- if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- err_bytes += PKT_LEN(pkt);
-#endif
- rte_pktmbuf_free_seg(pkt);
- } else {
- pkts[n++] = pkt;
- }
- }
- rxq->stats.idropped += (pkts_n - n);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Correct counters of errored completions. */
- rxq->stats.ipackets -= (pkts_n - n);
- rxq->stats.ibytes -= err_bytes;
-#endif
- rxq->pending_err = 0;
- return n;
-}
-
-/**
* Receive burst of packets. An errored completion also consumes a mbuf, but the
* packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
* before returning to application.
@@ -929,7 +674,7 @@ rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
-rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
@@ -984,26 +729,6 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rxq->crc_present * ETHER_CRC_LEN);
const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
- /* Compile time sanity check for this function. */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, pkt_info) != 0);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rx_hash_res) !=
- offsetof(struct mlx5_cqe, pkt_info) + 12);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd1) +
- sizeof(((struct mlx5_cqe *)0)->rsvd1) !=
- offsetof(struct mlx5_cqe, hdr_type_etc));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, vlan_info) !=
- offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd2) +
- sizeof(((struct mlx5_cqe *)0)->rsvd2) !=
- offsetof(struct mlx5_cqe, byte_cnt));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, sop_drop_qpn) !=
- RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, op_own) !=
- offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
assert(rxq->sges_n == 0);
assert(rxq->cqe_n == rxq->elts_n);
cq = &(*rxq->cqes)[cq_idx];
@@ -1022,7 +747,7 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
- rxq_replenish_bulk_mbuf(rxq, repl_n);
+ mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
if (rcvd_pkt > 0) {
@@ -1214,6 +939,16 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+ if (rxq->hw_timestamp) {
+ pkts[pos]->timestamp =
+ rte_be_to_cpu_64(cq[pos].timestamp);
+ pkts[pos + 1]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p1].timestamp);
+ pkts[pos + 2]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p2].timestamp);
+ pkts[pos + 3]->timestamp =
+ rte_be_to_cpu_64(cq[pos + p3].timestamp);
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
@@ -1254,164 +989,9 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rcvd_pkt += n;
}
}
- rte_wmb();
- *rxq->cq_db = htonl(rxq->cq_ci);
+ rte_compiler_barrier();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
return rcvd_pkt;
}
-/**
- * DPDK callback for vectorized RX.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct rxq *rxq = dpdk_rxq;
- uint16_t nb_rx;
-
- nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
- if (unlikely(rxq->pending_err))
- nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
- return nb_rx;
-}
-
-/**
- * Check Tx queue flags are set for raw vectorized Tx.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-int __attribute__((cold))
-priv_check_raw_vec_tx_support(struct priv *priv)
-{
- uint16_t i;
-
- /* All the configured queues should support. */
- for (i = 0; i < priv->txqs_n; ++i) {
- struct txq *txq = (*priv->txqs)[i];
-
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
- !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- break;
- }
- if (i != priv->txqs_n)
- return -ENOTSUP;
- return 1;
-}
-
-/**
- * Check a device can support vectorized TX.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv)
-{
- if (!priv->tx_vec_en ||
- priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
- return -ENOTSUP;
- return 1;
-}
-
-/**
- * Check a RX queue can support vectorized RX.
- *
- * @param rxq
- * Pointer to RX queue.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-int __attribute__((cold))
-rxq_check_vec_support(struct rxq *rxq)
-{
- struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
-
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
- return -ENOTSUP;
- return 1;
-}
-
-/**
- * Check a device can support vectorized RX.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-int __attribute__((cold))
-priv_check_vec_rx_support(struct priv *priv)
-{
- uint16_t i;
-
- if (!priv->rx_vec_en)
- return -ENOTSUP;
- /* All the configured queues should support. */
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
-
- if (rxq_check_vec_support(rxq) < 0)
- break;
- }
- if (i != priv->rxqs_n)
- return -ENOTSUP;
- return 1;
-}
-
-/**
- * Prepare for vectorized RX.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-priv_prep_vec_rx_function(struct priv *priv)
-{
- uint16_t i;
-
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
- const uint16_t desc = 1 << rxq->elts_n;
- int j;
-
- assert(rxq->elts_n == rxq->cqe_n);
- /* Initialize default rearm_data for vPMD. */
- mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(mbuf_init, 1);
- mbuf_init->nb_segs = 1;
- mbuf_init->port = rxq->port_id;
- /*
- * prevent compiler reordering:
- * rearm_data covers previous fields.
- */
- rte_compiler_barrier();
- rxq->mbuf_initializer =
- *(uint64_t *)&mbuf_init->rearm_data;
- /* Padding with a fake mbuf for vectorized Rx. */
- for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
- (*rxq->elts)[desc + j] = &rxq->fake_mbuf;
- /* Mark that it need to be cleaned up for rxq_alloc_elts(). */
- rxq->trim_elts = 1;
- }
-}
+#endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
new file mode 100644
index 00000000..5cd1ab80
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -0,0 +1,294 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+
+/**
+ * Initialise the socket to communicate with the secondary process
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+priv_socket_init(struct priv *priv)
+{
+ struct sockaddr_un sun = {
+ .sun_family = AF_UNIX,
+ };
+ int ret;
+ int flags;
+ struct stat file_stat;
+
+ /*
+ * Initialise the socket to communicate with the secondary
+ * process.
+ */
+ ret = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (ret < 0) {
+ WARN("secondary process not supported: %s", strerror(errno));
+ return ret;
+ }
+ priv->primary_socket = ret;
+ flags = fcntl(priv->primary_socket, F_GETFL, 0);
+ if (flags == -1)
+ goto out;
+ ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
+ if (ret < 0)
+ goto out;
+ snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
+ MLX5_DRIVER_NAME, priv->primary_socket);
+ ret = stat(sun.sun_path, &file_stat);
+ if (!ret)
+ claim_zero(remove(sun.sun_path));
+ ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
+ sizeof(sun));
+ if (ret < 0) {
+ WARN("cannot bind socket, secondary process not supported: %s",
+ strerror(errno));
+ goto close;
+ }
+ ret = listen(priv->primary_socket, 0);
+ if (ret < 0) {
+ WARN("Secondary process not supported: %s", strerror(errno));
+ goto close;
+ }
+ return ret;
+close:
+ remove(sun.sun_path);
+out:
+ claim_zero(close(priv->primary_socket));
+ priv->primary_socket = 0;
+ return -(ret);
+}
+
+/**
+ * Un-Initialise the socket to communicate with the secondary process
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+priv_socket_uninit(struct priv *priv)
+{
+ MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
+ claim_zero(close(priv->primary_socket));
+ priv->primary_socket = 0;
+ claim_zero(remove(path));
+ return 0;
+}
+
+/**
+ * Handle socket interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_socket_handle(struct priv *priv)
+{
+ int conn_sock;
+ int ret = 0;
+ struct cmsghdr *cmsg = NULL;
+ struct ucred *cred = NULL;
+ char buf[CMSG_SPACE(sizeof(struct ucred))] = { 0 };
+ char vbuf[1024] = { 0 };
+ struct iovec io = {
+ .iov_base = vbuf,
+ .iov_len = sizeof(*vbuf),
+ };
+ struct msghdr msg = {
+ .msg_iov = &io,
+ .msg_iovlen = 1,
+ .msg_control = buf,
+ .msg_controllen = sizeof(buf),
+ };
+ int *fd;
+
+ /* Accept the connection from the client. */
+ conn_sock = accept(priv->primary_socket, NULL, NULL);
+ if (conn_sock < 0) {
+ WARN("connection failed: %s", strerror(errno));
+ return;
+ }
+ ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
+ sizeof(int));
+ if (ret < 0) {
+ WARN("cannot change socket options");
+ goto out;
+ }
+ ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
+ if (ret < 0) {
+ WARN("received an empty message: %s", strerror(errno));
+ goto out;
+ }
+ /* Expect to receive credentials only. */
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ WARN("no message");
+ goto out;
+ }
+ if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
+ (cmsg->cmsg_len >= sizeof(*cred))) {
+ cred = (struct ucred *)CMSG_DATA(cmsg);
+ assert(cred != NULL);
+ }
+ cmsg = CMSG_NXTHDR(&msg, cmsg);
+ if (cmsg != NULL) {
+ WARN("Message wrongly formatted");
+ goto out;
+ }
+ /* Make sure all the ancillary data was received and valid. */
+ if ((cred == NULL) || (cred->uid != getuid()) ||
+ (cred->gid != getgid())) {
+ WARN("wrong credentials");
+ goto out;
+ }
+ /* Set-up the ancillary data. */
+ cmsg = CMSG_FIRSTHDR(&msg);
+ assert(cmsg != NULL);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(priv->ctx->cmd_fd));
+ fd = (int *)CMSG_DATA(cmsg);
+ *fd = priv->ctx->cmd_fd;
+ ret = sendmsg(conn_sock, &msg, 0);
+ if (ret < 0)
+ WARN("cannot send response");
+out:
+ close(conn_sock);
+}
+
+/**
+ * Connect to the primary process.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ *
+ * @return
+ * fd on success, negative errno value on failure.
+ */
+int
+priv_socket_connect(struct priv *priv)
+{
+ struct sockaddr_un sun = {
+ .sun_family = AF_UNIX,
+ };
+ int socket_fd;
+ int *fd = NULL;
+ int ret;
+ struct ucred *cred;
+ char buf[CMSG_SPACE(sizeof(*cred))] = { 0 };
+ char vbuf[1024] = { 0 };
+ struct iovec io = {
+ .iov_base = vbuf,
+ .iov_len = sizeof(*vbuf),
+ };
+ struct msghdr msg = {
+ .msg_control = buf,
+ .msg_controllen = sizeof(buf),
+ .msg_iov = &io,
+ .msg_iovlen = 1,
+ };
+ struct cmsghdr *cmsg;
+
+ ret = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (ret < 0) {
+ WARN("cannot connect to primary");
+ return ret;
+ }
+ socket_fd = ret;
+ snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
+ MLX5_DRIVER_NAME, priv->primary_socket);
+ ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
+ if (ret < 0) {
+ WARN("cannot connect to primary");
+ goto out;
+ }
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ DEBUG("cannot get first message");
+ goto out;
+ }
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_CREDENTIALS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
+ cred = (struct ucred *)CMSG_DATA(cmsg);
+ if (cred == NULL) {
+ DEBUG("no credentials received");
+ goto out;
+ }
+ cred->pid = getpid();
+ cred->uid = getuid();
+ cred->gid = getgid();
+ ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
+ if (ret < 0) {
+ WARN("cannot send credentials to primary: %s",
+ strerror(errno));
+ goto out;
+ }
+ ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
+ if (ret <= 0) {
+ WARN("no message from primary: %s", strerror(errno));
+ goto out;
+ }
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (cmsg == NULL) {
+ WARN("No file descriptor received");
+ goto out;
+ }
+ fd = (int *)CMSG_DATA(cmsg);
+ if (*fd <= 0) {
+ WARN("no file descriptor received: %s", strerror(errno));
+ ret = *fd;
+ goto out;
+ }
+ ret = *fd;
+out:
+ close(socket_fd);
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 703f48c3..5e225d37 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -34,16 +34,9 @@
#include <linux/sockios.h>
#include <linux/ethtool.h>
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ethdev.h>
#include <rte_common.h>
#include <rte_malloc.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
@@ -325,7 +318,7 @@ priv_xstats_reset(struct priv *priv)
* @param[out] stats
* Stats structure output buffer.
*/
-void
+int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = mlx5_get_priv(dev);
@@ -336,7 +329,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (rxq == NULL)
continue;
@@ -357,7 +350,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
tmp.rx_nombuf += rxq->stats.rx_nombuf;
}
for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
if (txq == NULL)
continue;
@@ -367,19 +360,20 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
tmp.q_opackets[idx] += txq->stats.opackets;
tmp.q_obytes[idx] += txq->stats.obytes;
#endif
- tmp.q_errors[idx] += txq->stats.odropped;
+ tmp.q_errors[idx] += txq->stats.oerrors;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.opackets += txq->stats.opackets;
tmp.obytes += txq->stats.obytes;
#endif
- tmp.oerrors += txq->stats.odropped;
+ tmp.oerrors += txq->stats.oerrors;
}
#ifndef MLX5_PMD_SOFT_COUNTERS
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
priv_unlock(priv);
+ return 0;
}
/**
@@ -442,8 +436,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev,
priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0)
+ if (stats_n < 0) {
+ priv_unlock(priv);
return -1;
+ }
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
ret = priv_xstats_get(priv, stats);
@@ -468,10 +464,11 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
if (stats_n < 0)
- return;
+ goto unlock;
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
priv_xstats_reset(priv);
+unlock:
priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 595a9e06..5de2d026 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -30,23 +30,90 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <unistd.h>
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+static void
+priv_txq_stop(struct priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i != priv->txqs_n; ++i)
+ mlx5_priv_txq_release(priv, i);
+}
+
+static int
+priv_txq_start(struct priv *priv)
+{
+ unsigned int i;
+ int ret = 0;
+
+ /* Add memory regions to Tx queues. */
+ for (i = 0; i != priv->txqs_n; ++i) {
+ unsigned int idx = 0;
+ struct mlx5_mr *mr;
+ struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
+
+ if (!txq_ctrl)
+ continue;
+ LIST_FOREACH(mr, &priv->mr, next)
+ priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
+ txq_alloc_elts(txq_ctrl);
+ txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
+ if (!txq_ctrl->ibv) {
+ ret = ENOMEM;
+ goto error;
+ }
+ }
+ return -ret;
+error:
+ priv_txq_stop(priv);
+ return -ret;
+}
+
+static void
+priv_rxq_stop(struct priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i)
+ mlx5_priv_rxq_release(priv, i);
+}
+
+static int
+priv_rxq_start(struct priv *priv)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
+
+ if (!rxq_ctrl)
+ continue;
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret)
+ goto error;
+ rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
+ if (!rxq_ctrl->ibv) {
+ ret = ENOMEM;
+ goto error;
+ }
+ }
+ return -ret;
+error:
+ priv_rxq_stop(priv);
+ return -ret;
+}
+
/**
* DPDK callback to start the device.
*
@@ -62,36 +129,47 @@ int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr = NULL;
int err;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
+ dev->data->dev_started = 1;
priv_lock(priv);
- if (priv->started) {
- priv_unlock(priv);
- return 0;
+ err = priv_flow_create_drop_queue(priv);
+ if (err) {
+ ERROR("%p: Drop queue allocation failed: %s",
+ (void *)dev, strerror(err));
+ goto error;
}
- /* Update Rx/Tx callback. */
- priv_select_tx_function(priv);
- priv_select_rx_function(priv);
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
- err = priv_create_hash_rxqs(priv);
- if (!err)
- err = priv_rehash_flows(priv);
- if (!err)
- priv->started = 1;
- else {
- ERROR("%p: an error occurred while configuring hash RX queues:"
+ rte_mempool_walk(mlx5_mp2mr_iter, priv);
+ err = priv_txq_start(priv);
+ if (err) {
+ ERROR("%p: TXQ allocation failed: %s",
+ (void *)dev, strerror(err));
+ goto error;
+ }
+ /* Update send callback. */
+ priv_dev_select_tx_function(priv, dev);
+ err = priv_rxq_start(priv);
+ if (err) {
+ ERROR("%p: RXQ allocation failed: %s",
+ (void *)dev, strerror(err));
+ goto error;
+ }
+ /* Update receive callback. */
+ priv_dev_select_rx_function(priv, dev);
+ err = priv_dev_traffic_enable(priv, dev);
+ if (err) {
+ ERROR("%p: an error occurred while configuring control flows:"
" %s",
(void *)priv, strerror(err));
goto error;
}
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
- priv_fdir_enable(priv);
- err = priv_flow_start(priv);
+ err = priv_flow_start(priv, &priv->flows);
if (err) {
- priv->started = 0;
ERROR("%p: an error occurred while configuring flows:"
" %s",
(void *)priv, strerror(err));
@@ -109,10 +187,14 @@ mlx5_dev_start(struct rte_eth_dev *dev)
return 0;
error:
/* Rollback. */
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
- priv_flow_stop(priv);
+ dev->data->dev_started = 0;
+ for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
+ priv_mr_release(priv, mr);
+ priv_flow_stop(priv, &priv->flows);
+ priv_dev_traffic_disable(priv, dev);
+ priv_txq_stop(priv);
+ priv_rxq_stop(priv);
+ priv_flow_delete_drop_queue(priv);
priv_unlock(priv);
return -err;
}
@@ -129,23 +211,215 @@ void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr;
if (mlx5_is_secondary())
return;
priv_lock(priv);
- if (!priv->started) {
- priv_unlock(priv);
- return;
- }
+ dev->data->dev_started = 0;
+ /* Prevent crashes when queues are still in use. */
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+ rte_wmb();
+ usleep(1000 * priv->rxqs_n);
DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
- priv_fdir_disable(priv);
- priv_flow_stop(priv);
+ priv_flow_stop(priv, &priv->flows);
+ priv_dev_traffic_disable(priv, dev);
priv_rx_intr_vec_disable(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
- priv->started = 0;
+ priv_txq_stop(priv);
+ priv_rxq_stop(priv);
+ for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
+ priv_mr_release(priv, mr);
+ priv_flow_delete_drop_queue(priv);
+ priv_unlock(priv);
+}
+
+/**
+ * Enable traffic flows configured by control plane
+ *
+ * @param priv
+ * Pointer to Ethernet device private data.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
+{
+ struct rte_flow_item_eth bcast = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ struct rte_flow_item_eth ipv6_multi_spec = {
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth ipv6_multi_mask = {
+ .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth unicast = {
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ const unsigned int vlan_filter_n = priv->vlan_filter_n;
+ const struct ether_addr cmp = {
+ .addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ };
+ unsigned int i;
+ unsigned int j;
+ int ret;
+
+ if (priv->isolated)
+ return 0;
+ if (dev->data->promiscuous) {
+ struct rte_flow_item_eth promisc = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
+
+ claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
+ return 0;
+ }
+ if (dev->data->all_multicast) {
+ struct rte_flow_item_eth multicast = {
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
+
+ claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
+ } else {
+ /* Add broadcast/multicast flows. */
+ for (i = 0; i != vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vlan),
+ };
+ struct rte_flow_item_vlan vlan_mask = {
+ .tci = 0xffff,
+ };
+
+ ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
+ &vlan_spec, &vlan_mask);
+ if (ret)
+ goto error;
+ ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
+ &ipv6_multi_mask,
+ &vlan_spec, &vlan_mask);
+ if (ret)
+ goto error;
+ }
+ if (!vlan_filter_n) {
+ ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
+ if (ret)
+ goto error;
+ ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
+ &ipv6_multi_mask);
+ if (ret)
+ goto error;
+ }
+ }
+ /* Add MAC address flows. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (!memcmp(mac, &cmp, sizeof(*mac)))
+ continue;
+ memcpy(&unicast.dst.addr_bytes,
+ mac->addr_bytes,
+ ETHER_ADDR_LEN);
+ for (j = 0; j != vlan_filter_n; ++j) {
+ uint16_t vlan = priv->vlan_filter[j];
+
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vlan),
+ };
+ struct rte_flow_item_vlan vlan_mask = {
+ .tci = 0xffff,
+ };
+
+ ret = mlx5_ctrl_flow_vlan(dev, &unicast,
+ &unicast_mask,
+ &vlan_spec,
+ &vlan_mask);
+ if (ret)
+ goto error;
+ }
+ if (!vlan_filter_n) {
+ ret = mlx5_ctrl_flow(dev, &unicast,
+ &unicast_mask);
+ if (ret)
+ goto error;
+ }
+ }
+ return 0;
+error:
+ return rte_errno;
+}
+
+
+/**
+ * Disable traffic flows configured by control plane
+ *
+ * @param priv
+ * Pointer to Ethernet device private data.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
+{
+ (void)dev;
+ priv_flow_flush(priv, &priv->ctrl_flows);
+ return 0;
+}
+
+/**
+ * Restart traffic flows configured by control plane
+ *
+ * @param priv
+ * Pointer to Ethernet device private data.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_started) {
+ priv_dev_traffic_disable(priv, dev);
+ priv_dev_traffic_enable(priv, dev);
+ }
+ return 0;
+}
+
+/**
+ * Restart traffic flows configured by control plane
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_traffic_restart(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ priv_lock(priv);
+ priv_dev_traffic_restart(priv, dev);
priv_unlock(priv);
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 98aaa7ca..9c5860ff 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -36,6 +36,8 @@
#include <errno.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -47,17 +49,10 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5_utils.h"
#include "mlx5_defs.h"
@@ -70,23 +65,15 @@
*
* @param txq_ctrl
* Pointer to TX queue structure.
- * @param elts_n
- * Number of elements to allocate.
*/
-static void
-txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
+void
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
+ const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
unsigned int i;
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
- volatile struct mlx5_wqe64 *wqe =
- (volatile struct mlx5_wqe64 *)
- txq_ctrl->txq.wqes + i;
-
- memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
- }
DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
@@ -100,7 +87,7 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
* Pointer to TX queue structure.
*/
static void
-txq_free_elts(struct txq_ctrl *txq_ctrl)
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -129,155 +116,231 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
}
/**
- * Clean up a TX queue.
+ * DPDK callback to configure a TX queue.
*
- * Destroy objects, free allocated memory and reset the structure for reuse.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
*
- * @param txq_ctrl
- * Pointer to TX queue structure.
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret = 0;
+
+ if (mlx5_is_secondary())
+ return -E_RTE_SECONDARY;
+
+ priv_lock(priv);
+ if (desc <= MLX5_TX_COMP_THRESH) {
+ WARN("%p: number of descriptors requested for TX queue %u"
+ " must be higher than MLX5_TX_COMP_THRESH, using"
+ " %u instead of %u",
+ (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ desc = MLX5_TX_COMP_THRESH + 1;
+ }
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ WARN("%p: increased number of descriptors in TX queue %u"
+ " to the next power of two (%d)",
+ (void *)dev, idx, desc);
+ }
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= priv->txqs_n) {
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, priv->txqs_n);
+ priv_unlock(priv);
+ return -EOVERFLOW;
+ }
+ if (!mlx5_priv_txq_releasable(priv, idx)) {
+ ret = EBUSY;
+ ERROR("%p: unable to release queue index %u",
+ (void *)dev, idx);
+ goto out;
+ }
+ mlx5_priv_txq_release(priv, idx);
+ txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ if (!txq_ctrl) {
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ ret = ENOMEM;
+ goto out;
+ }
+ DEBUG("%p: adding TX queue %p to list",
+ (void *)dev, (void *)txq_ctrl);
+ (*priv->txqs)[idx] = &txq_ctrl->txq;
+out:
+ priv_unlock(priv);
+ return -ret;
+}
+
+/**
+ * DPDK callback to release a TX queue.
+ *
+ * @param dpdk_txq
+ * Generic TX queue pointer.
*/
void
-txq_cleanup(struct txq_ctrl *txq_ctrl)
+mlx5_tx_queue_release(void *dpdk_txq)
{
- size_t i;
-
- DEBUG("cleaning up %p", (void *)txq_ctrl);
- txq_free_elts(txq_ctrl);
- if (txq_ctrl->qp != NULL)
- claim_zero(ibv_destroy_qp(txq_ctrl->qp));
- if (txq_ctrl->cq != NULL)
- claim_zero(ibv_destroy_cq(txq_ctrl->cq));
- for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (txq_ctrl->txq.mp2mr[i].mr == NULL)
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ struct priv *priv;
+ unsigned int i;
+
+ if (mlx5_is_secondary())
+ return;
+
+ if (txq == NULL)
+ return;
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ priv = txq_ctrl->priv;
+ priv_lock(priv);
+ for (i = 0; (i != priv->txqs_n); ++i)
+ if ((*priv->txqs)[i] == txq) {
+ DEBUG("%p: removing TX queue %p from list",
+ (void *)priv->dev, (void *)txq_ctrl);
+ mlx5_priv_txq_release(priv, i);
break;
- claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
- }
- memset(txq_ctrl, 0, sizeof(*txq_ctrl));
+ }
+ priv_unlock(priv);
}
+
/**
- * Initialize TX queue.
+ * Map locally UAR used in Tx queues for BlueFlame doorbell.
*
- * @param tmpl
- * Pointer to TX queue control template.
- * @param txq_ctrl
- * Pointer to TX queue control.
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param fd
+ * Verbs file descriptor to map UAR pages.
*
* @return
* 0 on success, errno value on failure.
*/
-static inline int
-txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+int
+priv_tx_uar_remap(struct priv *priv, int fd)
{
- struct mlx5_qp *qp = to_mqp(tmpl->qp);
- struct ibv_cq *ibcq = tmpl->cq;
- struct ibv_mlx5_cq_info cq_info;
+ unsigned int i, j;
+ uintptr_t pages[priv->txqs_n];
+ unsigned int pages_n = 0;
+ uintptr_t uar_va;
+ void *addr;
+ struct mlx5_txq_data *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
+ int already_mapped;
+ size_t page_size = sysconf(_SC_PAGESIZE);
- if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
- ERROR("Unable to query CQ info. check your OFED.");
- return ENOTSUP;
- }
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
- return EINVAL;
+ memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
+ /*
+ * As rdma-core, UARs are mapped in size of OS page size.
+ * Use aligned address to avoid duplicate mmap.
+ * Ref to libmlx5 function: mlx5_init_context()
+ */
+ for (i = 0; i != priv->txqs_n; ++i) {
+ txq = (*priv->txqs)[i];
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
+ already_mapped = 0;
+ for (j = 0; j != pages_n; ++j) {
+ if (pages[j] == uar_va) {
+ already_mapped = 1;
+ break;
+ }
+ }
+ if (already_mapped)
+ continue;
+ pages[pages_n++] = uar_va;
+ addr = mmap((void *)uar_va, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (addr != (void *)uar_va) {
+ ERROR("call to mmap failed on UAR for txq %d\n", i);
+ return -1;
+ }
}
- tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
- tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
- tmpl->txq.wqes = qp->gen_data.sqstart;
- tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
- tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
- tmpl->txq.bf_reg = qp->gen_data.bf->reg;
- tmpl->txq.cq_db = cq_info.dbrec;
- tmpl->txq.cqes =
- (volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq_info.buf;
- tmpl->txq.elts =
- (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
- ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
return 0;
}
/**
- * Configure a TX queue.
+ * Create the Tx queue Verbs object.
*
- * @param dev
- * Pointer to Ethernet device structure.
- * @param txq_ctrl
- * Pointer to TX queue structure.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
*
* @return
- * 0 on success, errno value on failure.
+ * The Verbs object initialised if it can be created.
*/
-int
-txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_txconf *conf)
+struct mlx5_txq_ibv*
+mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
{
- struct priv *priv = mlx5_get_priv(dev);
- struct txq_ctrl tmpl = {
- .priv = priv,
- .socket = socket,
- };
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ struct mlx5_txq_ibv tmpl;
+ struct mlx5_txq_ibv *txq_ibv;
union {
- struct ibv_exp_qp_init_attr init;
- struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_qp_attr mod;
- struct ibv_exp_cq_attr cq_attr;
+ struct ibv_qp_init_attr_ex init;
+ struct ibv_cq_init_attr_ex cq;
+ struct ibv_qp_attr mod;
+ struct ibv_cq_ex cq_attr;
} attr;
unsigned int cqe_n;
- const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
- (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE);
+ struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_obj obj;
+ const int desc = 1 << txq_data->elts_n;
int ret = 0;
+ assert(txq_data);
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ret = ENOTSUP;
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
goto error;
}
- tmpl.txq.flags = conf->txq_flags;
- assert(desc > MLX5_TX_COMP_THRESH);
- tmpl.txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+ memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
/* MRs will be registered in mp2mr[] later. */
- attr.cq = (struct ibv_exp_cq_init_attr){
+ attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (priv->mps == MLX5_MPW_ENHANCED)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = ibv_exp_create_cq(priv->ctx,
- cqe_n,
- NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ret = ENOMEM;
- ERROR("%p: CQ creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: CQ creation failure", (void *)txq_ctrl);
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- attr.init = (struct ibv_exp_qp_init_attr){
+ attr.init = (struct ibv_qp_init_attr_ex){
/* CQ to be associated with the send queue. */
.send_cq = tmpl.cq,
/* CQ to be associated with the receive queue. */
.recv_cq = tmpl.cq,
.cap = {
/* Max number of outstanding WRs. */
- .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
+ .max_send_wr =
+ ((priv->device_attr.orig_attr.max_qp_wr <
+ desc) ?
+ priv->device_attr.orig_attr.max_qp_wr :
+ desc),
/*
* Max number of scatter/gather elements in a WR,
* must be 1 to prevent libmlx5 from trying to affect
@@ -288,124 +351,204 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
.max_send_sge = 1,
},
.qp_type = IBV_QPT_RAW_PACKET,
- /* Do *NOT* enable this, completions events are managed per
- * TX burst. */
+ /*
+ * Do *NOT* enable this, completions events are managed per
+ * Tx burst.
+ */
.sq_sig_all = 0,
.pd = priv->pd,
- .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+ .comp_mask = IBV_QP_INIT_ATTR_PD,
};
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
- tmpl.txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE);
- tmpl.txq.inline_en = 1;
- /* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
- tmpl.txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
- /* To minimize the size of data set, avoid requesting
- * too large WQ.
- */
- attr.init.cap.max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
- (RTE_CACHE_LINE_SIZE - 1)) /
- RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
- int inline_diff = tmpl.txq.max_inline - max_tso_inline;
-
- /*
- * Adjust inline value as Verbs aggregates
- * tso_inline and txq_inline fields.
- */
- attr.init.cap.max_inline_data = inline_diff > 0 ?
- inline_diff *
- RTE_CACHE_LINE_SIZE :
- 0;
- } else {
- attr.init.cap.max_inline_data =
- tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
- }
+ if (txq_data->inline_en)
+ attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
+ if (txq_data->tso_en) {
+ attr.init.max_tso_header = txq_ctrl->max_tso_header;
+ attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- if (priv->tso) {
- attr.init.max_tso_header =
- max_tso_inline * RTE_CACHE_LINE_SIZE;
- attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
- tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
- max_tso_inline);
- tmpl.txq.tso_en = 1;
- }
- if (priv->tunnel_en)
- tmpl.txq.tunnel_en = 1;
- tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
+ tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ret = (errno ? errno : EINVAL);
- ERROR("%p: QP creation failure: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP creation failure", (void *)txq_ctrl);
goto error;
}
- DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
- " max_inline_data=%u",
- attr.init.cap.max_send_wr,
- attr.init.cap.max_send_sge,
- attr.init.cap.max_inline_data);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* Primary port number. */
.port_num = priv->port
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
- (IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
goto error;
}
- ret = txq_setup(&tmpl, txq_ctrl);
- if (ret) {
- ERROR("%p: cannot initialize TX queue structure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
- txq_alloc_elts(&tmpl, desc);
- attr.mod = (struct ibv_exp_qp_attr){
+ attr.mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
+ ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
goto error;
}
- /* Clean up txq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
- txq_cleanup(txq_ctrl);
- *txq_ctrl = tmpl;
- DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
- /* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
- assert(ret == 0);
- return 0;
+ txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
+ txq_ctrl->socket);
+ if (!txq_ibv) {
+ ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ goto error;
+ }
+ obj.cq.in = tmpl.cq;
+ obj.cq.out = &cq_info;
+ obj.qp.in = tmpl.qp;
+ obj.qp.out = &qp;
+ ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ if (ret != 0)
+ goto error;
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ goto error;
+ }
+ txq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
+ txq_data->wqes = qp.sq.buf;
+ txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
+ txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
+ txq_data->bf_reg = qp.bf.reg;
+ txq_data->cq_db = cq_info.dbrec;
+ txq_data->cqes =
+ (volatile struct mlx5_cqe (*)[])
+ (uintptr_t)cq_info.buf;
+ txq_data->cq_ci = 0;
+ txq_data->cq_pi = 0;
+ txq_data->wqe_ci = 0;
+ txq_data->wqe_pi = 0;
+ txq_ibv->qp = tmpl.qp;
+ txq_ibv->cq = tmpl.cq;
+ rte_atomic32_inc(&txq_ibv->refcnt);
+ if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
+ txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+ } else {
+ ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ goto error;
+ }
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ return txq_ibv;
error:
- txq_cleanup(&tmpl);
- assert(ret > 0);
+ if (tmpl.cq)
+ claim_zero(ibv_destroy_cq(tmpl.cq));
+ if (tmpl.qp)
+ claim_zero(ibv_destroy_qp(tmpl.qp));
+ return NULL;
+}
+
+/**
+ * Get an Tx queue Verbs object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs object if it exists.
+ */
+struct mlx5_txq_ibv*
+mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_txq_ctrl *txq_ctrl;
+
+ if (idx >= priv->txqs_n)
+ return NULL;
+ if (!(*priv->txqs)[idx])
+ return NULL;
+ txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (txq_ctrl->ibv) {
+ rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ctrl->ibv,
+ rte_atomic32_read(&txq_ctrl->ibv->refcnt));
+ }
+ return txq_ctrl->ibv;
+}
+
+/**
+ * Release an Tx verbs queue object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ *
+ * @return
+ * 0 on success, errno on failure.
+ */
+int
+mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+{
+ (void)priv;
+ assert(txq_ibv);
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
+ claim_zero(ibv_destroy_qp(txq_ibv->qp));
+ claim_zero(ibv_destroy_cq(txq_ibv->cq));
+ LIST_REMOVE(txq_ibv, next);
+ rte_free(txq_ibv);
+ return 0;
+ }
+ return EBUSY;
+}
+
+/**
+ * Return true if a single reference exists on the object.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param txq_ibv
+ * Verbs Tx queue object.
+ */
+int
+mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+{
+ (void)priv;
+ assert(txq_ibv);
+ return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
+}
+
+/**
+ * Verify the Verbs Tx queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_txq_ibv_verify(struct priv *priv)
+{
+ int ret = 0;
+ struct mlx5_txq_ibv *txq_ibv;
+
+ LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
+ DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
+ (void *)txq_ibv);
+ ++ret;
+ }
return ret;
}
/**
- * DPDK callback to configure a TX queue.
+ * Create a DPDK Tx queue.
*
- * @param dev
- * Pointer to Ethernet device structure.
+ * @param priv
+ * Pointer to private structure.
* @param idx
* TX queue index.
* @param desc
@@ -413,164 +556,236 @@ error:
* @param socket
* NUMA socket on which memory must be allocated.
* @param[in] conf
- * Thresholds parameters.
+ * Thresholds parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * A DPDK queue object on success.
*/
-int
-mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
- struct txq *txq = (*priv->txqs)[idx];
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- int ret;
+ const unsigned int max_tso_inline =
+ ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ struct mlx5_txq_ctrl *tmpl;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.flags = conf->txq_flags;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ if (priv->mps == MLX5_MPW_ENHANCED)
+ tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
+ /* MRs will be registered in mp2mr[] later. */
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
+ if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int ds_cnt;
- priv_lock(priv);
- if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("%p: number of descriptors requested for TX queue %u"
- " must be higher than MLX5_TX_COMP_THRESH, using"
- " %u instead of %u",
- (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
- desc = MLX5_TX_COMP_THRESH + 1;
- }
- if (!rte_is_power_of_2(desc)) {
- desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in TX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
- }
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
- }
- if (txq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)txq);
- if (priv->started) {
- priv_unlock(priv);
- return -EEXIST;
- }
- (*priv->txqs)[idx] = NULL;
- txq_cleanup(txq_ctrl);
- /* Resize if txq size is changed. */
- if (txq_ctrl->txq.elts_n != log2above(desc)) {
- txq_ctrl = rte_realloc(txq_ctrl,
- sizeof(*txq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
- RTE_CACHE_LINE_SIZE);
- if (!txq_ctrl) {
- ERROR("%p: unable to reallocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
- }
+ tmpl->txq.max_inline =
+ ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
+ tmpl->txq.inline_en = 1;
+ /* TSO and MPS can't be enabled concurrently. */
+ assert(!priv->tso || !priv->mps);
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ tmpl->txq.inline_max_packet_sz =
+ priv->inline_max_packet_sz;
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ tmpl->max_inline_data =
+ ((RTE_MIN(priv->txq_inline,
+ priv->inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else if (priv->tso) {
+ int inline_diff = tmpl->txq.max_inline - max_tso_inline;
+
+ /*
+ * Adjust inline value as Verbs aggregates
+ * tso_inline and txq_inline fields.
+ */
+ tmpl->max_inline_data = inline_diff > 0 ?
+ inline_diff *
+ RTE_CACHE_LINE_SIZE :
+ 0;
+ } else {
+ tmpl->max_inline_data =
+ tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
- } else {
- txq_ctrl =
- rte_calloc_socket("TXQ", 1,
- sizeof(*txq_ctrl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
- if (txq_ctrl == NULL) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- priv_unlock(priv);
- return -ENOMEM;
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ WARN("txq inline is too large (%d) setting it to "
+ "the maximum possible: %d\n",
+ priv->txq_inline, max_inline);
+ tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
- ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
- if (ret)
- rte_free(txq_ctrl);
- else {
- txq_ctrl->txq.stats.idx = idx;
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq_ctrl);
- (*priv->txqs)[idx] = &txq_ctrl->txq;
+ if (priv->tso) {
+ tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
+ max_tso_inline);
+ tmpl->txq.tso_en = 1;
}
- priv_unlock(priv);
- return -ret;
+ if (priv->tunnel_en)
+ tmpl->txq.tunnel_en = 1;
+ tmpl->txq.elts =
+ (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.stats.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
+ return tmpl;
}
/**
- * DPDK callback to release a TX queue.
+ * Get a Tx queue.
*
- * @param dpdk_txq
- * Generic TX queue pointer.
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists.
*/
-void
-mlx5_tx_queue_release(void *dpdk_txq)
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct txq_ctrl *txq_ctrl;
- struct priv *priv;
- unsigned int i;
+ struct mlx5_txq_ctrl *ctrl = NULL;
- if (mlx5_is_secondary())
- return;
+ if ((*priv->txqs)[idx]) {
+ ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
+ txq);
+ unsigned int i;
- if (txq == NULL)
- return;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- priv = txq_ctrl->priv;
- priv_lock(priv);
- for (i = 0; (i != priv->txqs_n); ++i)
- if ((*priv->txqs)[i] == txq) {
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq_ctrl);
- (*priv->txqs)[i] = NULL;
- break;
+ mlx5_priv_txq_ibv_get(priv, idx);
+ for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
+ struct mlx5_mr *mr = NULL;
+
+ (void)mr;
+ if (ctrl->txq.mp2mr[i]) {
+ mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
+ assert(mr);
+ }
}
- txq_cleanup(txq_ctrl);
- rte_free(txq_ctrl);
- priv_unlock(priv);
+ rte_atomic32_inc(&ctrl->refcnt);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
+ }
+ return ctrl;
}
/**
- * DPDK callback for TX in secondary processes.
- *
- * This function configures all queues from primary process information
- * if necessary before reverting to the normal TX burst callback.
+ * Release a Tx queue.
*
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
*
* @return
- * Number of packets successfully transmitted (<= pkts_n).
+ * 0 on success, errno on failure.
*/
-uint16_t
-mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
+int
+mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
{
- struct txq *txq = dpdk_txq;
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
- struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv);
- struct priv *primary_priv;
- unsigned int index;
+ unsigned int i;
+ struct mlx5_txq_ctrl *txq;
- if (priv == NULL)
+ if (!(*priv->txqs)[idx])
return 0;
- primary_priv =
- mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
- /* Look for queue index in both private structures. */
- for (index = 0; index != priv->txqs_n; ++index)
- if (((*primary_priv->txqs)[index] == txq) ||
- ((*priv->txqs)[index] == txq))
- break;
- if (index == priv->txqs_n)
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ (void *)txq, rte_atomic32_read(&txq->refcnt));
+ if (txq->ibv) {
+ int ret;
+
+ ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
+ if (!ret)
+ txq->ibv = NULL;
+ }
+ for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
+ if (txq->txq.mp2mr[i]) {
+ priv_mr_release(priv, txq->txq.mp2mr[i]);
+ txq->txq.mp2mr[i] = NULL;
+ }
+ }
+ if (rte_atomic32_dec_and_test(&txq->refcnt)) {
+ txq_free_elts(txq);
+ LIST_REMOVE(txq, next);
+ rte_free(txq);
+ (*priv->txqs)[idx] = NULL;
return 0;
- txq = (*priv->txqs)[index];
- return priv->dev->tx_pkt_burst(txq, pkts, pkts_n);
+ }
+ return EBUSY;
+}
+
+/**
+ * Verify if the queue can be released.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ *
+ * @return
+ * 1 if the queue can be released.
+ */
+int
+mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
+{
+ struct mlx5_txq_ctrl *txq;
+
+ if (!(*priv->txqs)[idx])
+ return -1;
+ txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ return (rte_atomic32_read(&txq->refcnt) == 1);
+}
+
+/**
+ * Verify the Tx Queue list is empty
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_txq_verify(struct priv *priv)
+{
+ struct mlx5_txq_ctrl *txq;
+ int ret = 0;
+
+ LIST_FOREACH(txq, &priv->txqsctrl, next) {
+ DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
+ (void *)txq);
+ ++ret;
+ }
+ return ret;
}
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index a824787f..218ae831 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -128,11 +128,13 @@ pmd_drv_log_basename(const char *s)
#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#else /* NDEBUG */
#define DEBUG(...) (void)0
#define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
#endif /* NDEBUG */
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 1b0fa40a..6fc315ef 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -36,22 +36,15 @@
#include <assert.h>
#include <stdint.h>
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ethdev.h>
#include <rte_common.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
/**
- * Configure a VLAN filter.
+ * DPDK callback to configure a VLAN filter.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -61,14 +54,16 @@
* Toggle filter.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
-static int
-vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+int
+mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
+ int ret = 0;
+ priv_lock(priv);
DEBUG("%p: %s VLAN filter ID %" PRIu16,
(void *)dev, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
@@ -76,13 +71,15 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (priv->vlan_filter[i] == vlan_id)
break;
/* Check if there's room for another VLAN filter. */
- if (i == RTE_DIM(priv->vlan_filter))
- return ENOMEM;
+ if (i == RTE_DIM(priv->vlan_filter)) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (i < priv->vlan_filter_n) {
assert(priv->vlan_filter_n != 0);
/* Enabling an existing VLAN filter has no effect. */
if (on)
- return 0;
+ goto out;
/* Remove VLAN filter from list. */
--priv->vlan_filter_n;
memmove(&priv->vlan_filter[i],
@@ -94,41 +91,16 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
assert(i == priv->vlan_filter_n);
/* Disabling an unknown VLAN filter has no effect. */
if (!on)
- return 0;
+ goto out;
/* Add new VLAN filter. */
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
- /* Rehash flows in all hash RX queues. */
- priv_mac_addrs_disable(priv);
- priv_special_flow_disable_all(priv);
- return priv_rehash_flows(priv);
-}
-
-/**
- * DPDK callback to configure a VLAN filter.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param vlan_id
- * VLAN ID to filter.
- * @param on
- * Toggle filter.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
- struct priv *priv = dev->data->dev_private;
- int ret;
-
- priv_lock(priv);
- ret = vlan_filter_set(dev, vlan_id, on);
+ if (dev->data->dev_started)
+ priv_dev_traffic_restart(priv, dev);
+out:
priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ return ret;
}
/**
@@ -144,22 +116,24 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
static void
priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
{
- struct rxq *rxq = (*priv->rxqs)[idx];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
- struct ibv_exp_wq_attr mod;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
uint16_t vlan_offloads =
- (on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) |
+ (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
0;
int err;
DEBUG("set VLAN offloads 0x%x for port %d queue %d",
vlan_offloads, rxq->port_id, idx);
- mod = (struct ibv_exp_wq_attr){
- .attr_mask = IBV_EXP_WQ_ATTR_VLAN_OFFLOADS,
- .vlan_offloads = vlan_offloads,
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_FLAGS,
+ .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
+ .flags = vlan_offloads,
};
- err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
+ err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
if (err) {
ERROR("%p: failed to modified stripping mode: %s",
(void *)priv, strerror(err));
@@ -210,7 +184,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
* @param mask
* VLAN offload bit mask.
*/
-void
+int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct priv *priv = dev->data->dev_private;
@@ -221,7 +195,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (!priv->hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
- return;
+ return 0;
}
/* Run on every RX queue and set/reset VLAN stripping. */
@@ -230,4 +204,6 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
priv_unlock(priv);
}
+
+ return 0;
}
diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile
new file mode 100644
index 00000000..815c3bae
--- /dev/null
+++ b/drivers/net/mrvl/Makefile
@@ -0,0 +1,68 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mrvl.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mrvl_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_ARCH_DMA_ADDR_T_64BIT
+CFLAGS += -DCONF_PP2_BPOOL_COOKIE_SIZE=32
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_qos.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
new file mode 100644
index 00000000..29361652
--- /dev/null
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -0,0 +1,2294 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Semihalf nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include <drivers/mv_pp2.h>
+#include <drivers/mv_pp2_bpool.h>
+#include <drivers/mv_pp2_hif.h>
+
+#include <fcntl.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "mrvl_ethdev.h"
+#include "mrvl_qos.h"
+
+/* bitmask with reserved hifs */
+#define MRVL_MUSDK_HIFS_RESERVED 0x0F
+/* bitmask with reserved bpools */
+#define MRVL_MUSDK_BPOOLS_RESERVED 0x07
+/* bitmask with reserved kernel RSS tables */
+#define MRVL_MUSDK_RSS_RESERVED 0x01
+/* maximum number of available hifs */
+#define MRVL_MUSDK_HIFS_MAX 9
+
+/* prefetch shift */
+#define MRVL_MUSDK_PREFETCH_SHIFT 2
+
+/* TCAM has 25 entries reserved for uc/mc filter entries */
+#define MRVL_MAC_ADDRS_MAX 25
+#define MRVL_MATCH_LEN 16
+#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
+/* Maximum allowable packet size */
+#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
+
+#define MRVL_IFACE_NAME_ARG "iface"
+#define MRVL_CFG_ARG "cfg"
+
+#define MRVL_BURST_SIZE 64
+
+#define MRVL_ARP_LENGTH 28
+
+#define MRVL_COOKIE_ADDR_INVALID ~0ULL
+
+#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
+#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
+
+/* Memory size (in bytes) for MUSDK dma buffers */
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+static const char * const valid_args[] = {
+ MRVL_IFACE_NAME_ARG,
+ MRVL_CFG_ARG,
+ NULL
+};
+
+static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+static struct pp2_hif *hifs[RTE_MAX_LCORE];
+static int used_bpools[PP2_NUM_PKT_PROC] = {
+ MRVL_MUSDK_BPOOLS_RESERVED,
+ MRVL_MUSDK_BPOOLS_RESERVED
+};
+
+struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
+int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
+uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+
+/*
+ * To use buffer harvesting based on loopback port shadow queue structure
+ * was introduced for buffers information bookkeeping.
+ *
+ * Before sending the packet, related buffer information (pp2_buff_inf) is
+ * stored in shadow queue. After packet is transmitted no longer used
+ * packet buffer is released back to it's original hardware pool,
+ * on condition it originated from interface.
+ * In case it was generated by application itself i.e: mbuf->port field is
+ * 0xff then its released to software mempool.
+ */
+struct mrvl_shadow_txq {
+ int head; /* write index - used when sending buffers */
+ int tail; /* read index - used when releasing buffers */
+ u16 size; /* queue occupied size */
+ u16 num_to_release; /* number of buffers sent, that can be released */
+ struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
+};
+
+struct mrvl_rxq {
+ struct mrvl_priv *priv;
+ struct rte_mempool *mp;
+ int queue_id;
+ int port_id;
+ int cksum_enabled;
+ uint64_t bytes_recv;
+ uint64_t drop_mac;
+};
+
+struct mrvl_txq {
+ struct mrvl_priv *priv;
+ int queue_id;
+ int port_id;
+ uint64_t bytes_sent;
+};
+
+/*
+ * Every tx queue should have dedicated shadow tx queue.
+ *
+ * Ports assigned by DPDK might not start at zero or be continuous so
+ * as a workaround define shadow queues for each possible port so that
+ * we eventually fit somewhere.
+ */
+struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
+
+/** Number of ports configured. */
+int mrvl_ports_nb;
+static int mrvl_lcore_first;
+static int mrvl_lcore_last;
+
+static inline int
+mrvl_get_bpool_size(int pp2_id, int pool_id)
+{
+ int i;
+ int size = 0;
+
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
+ size += mrvl_port_bpool_size[pp2_id][pool_id][i];
+
+ return size;
+}
+
+static inline int
+mrvl_reserve_bit(int *bitmap, int max)
+{
+ int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
+
+ if (n >= max)
+ return -1;
+
+ *bitmap |= 1 << n;
+
+ return n;
+}
+
+/**
+ * Configure rss based on dpdk rss configuration.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
+{
+ if (rss_conf->rss_key)
+ RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
+
+ if (rss_conf->rss_hf == 0) {
+ priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
+ } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_2_TUPLE;
+ } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_5_TUPLE;
+ priv->rss_hf_tcp = 1;
+ } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ priv->ppio_params.inqs_params.hash_type =
+ PP2_PPIO_HASH_T_5_TUPLE;
+ priv->rss_hf_tcp = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Ethernet device configuration.
+ *
+ * Prepare the driver for a given number of TX and RX queues and
+ * configure RSS.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_configure(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
+ dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
+ dev->data->dev_conf.rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ RTE_LOG(INFO, PMD,
+ "L2 CRC stripping is always enabled in hw\n");
+ dev->data->dev_conf.rxmode.hw_strip_crc = 1;
+ }
+
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
+ RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.split_hdr_size) {
+ RTE_LOG(INFO, PMD, "Split headers not supported\n");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.enable_lro) {
+ RTE_LOG(INFO, PMD, "LRO not supported\n");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_conf.rxmode.jumbo_frame)
+ dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ ret = mrvl_configure_rxqs(priv, dev->data->port_id,
+ dev->data->nb_rx_queues);
+ if (ret < 0)
+ return ret;
+
+ priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
+ priv->ppio_params.maintain_stats = 1;
+ priv->nb_rx_queues = dev->data->nb_rx_queues;
+
+ if (dev->data->nb_rx_queues == 1 &&
+ dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
+ priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
+
+ return 0;
+ }
+
+ return mrvl_configure_rss(priv,
+ &dev->data->dev_conf.rx_adv_conf.rss_conf);
+}
+
+/**
+ * DPDK callback to change the MTU.
+ *
+ * Setting the MTU affects hardware MRU (packets larger than the MRU
+ * will be dropped).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mtu
+ * New MTU.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ /* extra MV_MH_SIZE bytes are required for Marvell tag */
+ uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ int ret;
+
+ if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
+ return -EINVAL;
+
+ ret = pp2_ppio_set_mru(priv->ppio, mru);
+ if (ret)
+ return ret;
+
+ return pp2_ppio_set_mtu(priv->ppio, mtu);
+}
+
+/**
+ * DPDK callback to bring the link up.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_enable(priv->ppio);
+ if (ret)
+ return ret;
+
+ /*
+ * mtu/mru can be updated if pp2_ppio_enable() was called at least once
+ * as pp2_ppio_enable() changes port->t_mode from default 0 to
+ * PP2_TRAFFIC_INGRESS_EGRESS.
+ *
+ * Set mtu to default DPDK value here.
+ */
+ ret = mrvl_mtu_set(dev, dev->data->mtu);
+ if (ret)
+ pp2_ppio_disable(priv->ppio);
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return ret;
+}
+
+/**
+ * DPDK callback to bring the link down.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_disable(priv->ppio);
+ if (ret)
+ return ret;
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ return ret;
+}
+
+/**
+ * DPDK callback to start the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mrvl_dev_start(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char match[MRVL_MATCH_LEN];
+ int ret;
+
+ snprintf(match, sizeof(match), "ppio-%d:%d",
+ priv->pp_id, priv->ppio_id);
+ priv->ppio_params.match = match;
+
+ /*
+ * Calculate the maximum bpool size for refill feature to 1.5 of the
+ * configured size. In case the bpool size will exceed this value,
+ * superfluous buffers will be removed
+ */
+ priv->bpool_max_size = priv->bpool_init_size +
+ (priv->bpool_init_size >> 1);
+ /*
+ * Calculate the minimum bpool size for refill feature as follows:
+ * 2 default burst sizes multiply by number of rx queues.
+ * If the bpool size will be below this value, new buffers will
+ * be added to the pool.
+ */
+ priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
+
+ ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
+ if (ret)
+ return ret;
+
+ /*
+ * In case there are some some stale uc/mc mac addresses flush them
+ * here. It cannot be done during mrvl_dev_close() as port information
+ * is already gone at that point (due to pp2_ppio_deinit() in
+ * mrvl_dev_stop()).
+ */
+ if (!priv->uc_mc_flushed) {
+ ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Failed to flush uc/mc filter list\n");
+ goto out;
+ }
+ priv->uc_mc_flushed = 1;
+ }
+
+ if (!priv->vlan_flushed) {
+ ret = pp2_ppio_flush_vlan(priv->ppio);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
+ /*
+ * TODO
+ * once pp2_ppio_flush_vlan() is supported jump to out
+ * goto out;
+ */
+ }
+ priv->vlan_flushed = 1;
+ }
+
+ /* For default QoS config, don't start classifier. */
+ if (mrvl_qos_cfg) {
+ ret = mrvl_start_qos_mapping(priv);
+ if (ret) {
+ pp2_ppio_deinit(priv->ppio);
+ return ret;
+ }
+ }
+
+ ret = mrvl_dev_set_link_up(dev);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ pp2_ppio_deinit(priv->ppio);
+ return ret;
+}
+
+/**
+ * Flush receive queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_rx_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ RTE_LOG(INFO, PMD, "Flushing rx queues\n");
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ int ret, num;
+
+ do {
+ struct mrvl_rxq *q = dev->data->rx_queues[i];
+ struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
+
+ num = MRVL_PP2_RXD_MAX;
+ ret = pp2_ppio_recv(q->priv->ppio,
+ q->priv->rxq_map[q->queue_id].tc,
+ q->priv->rxq_map[q->queue_id].inq,
+ descs, (uint16_t *)&num);
+ } while (ret == 0 && num);
+ }
+}
+
+/**
+ * Flush transmit shadow queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
+{
+ int i;
+
+ RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ struct mrvl_shadow_txq *sq =
+ &shadow_txqs[dev->data->port_id][i];
+
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
+ sq->ent[sq->tail].buff.cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+
+ memset(sq, 0, sizeof(*sq));
+ }
+}
+
+/**
+ * Flush hardware bpool (buffer-pool).
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_flush_bpool(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ uint32_t num;
+ int ret;
+
+ ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
+ return;
+ }
+
+ while (num--) {
+ struct pp2_buff_inf inf;
+ uint64_t addr;
+
+ ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
+ &inf);
+ if (ret)
+ break;
+
+ addr = cookie_addr_high | inf.cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+}
+
+/**
+ * DPDK callback to stop the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_dev_stop(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ mrvl_dev_set_link_down(dev);
+ mrvl_flush_rx_queues(dev);
+ mrvl_flush_tx_shadow_queues(dev);
+ if (priv->qos_tbl)
+ pp2_cls_qos_tbl_deinit(priv->qos_tbl);
+ pp2_ppio_deinit(priv->ppio);
+ priv->ppio = NULL;
+}
+
+/**
+ * DPDK callback to close the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_dev_close(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ size_t i;
+
+ for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
+ struct pp2_ppio_tc_params *tc_params =
+ &priv->ppio_params.inqs_params.tcs_params[i];
+
+ if (tc_params->inqs_params) {
+ rte_free(tc_params->inqs_params);
+ tc_params->inqs_params = NULL;
+ }
+ }
+
+ mrvl_flush_bpool(dev);
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ /*
+ * TODO
+ * once MUSDK provides necessary API use it here
+ */
+ struct ethtool_cmd edata;
+ struct ifreq req;
+ int ret, fd;
+
+ edata.cmd = ETHTOOL_GSET;
+
+ strcpy(req.ifr_name, dev->data->name);
+ req.ifr_data = (void *)&edata;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd == -1)
+ return -EFAULT;
+
+ ret = ioctl(fd, SIOCETHTOOL, &req);
+ if (ret == -1) {
+ close(fd);
+ return -EFAULT;
+ }
+
+ close(fd);
+
+ switch (ethtool_cmd_speed(&edata)) {
+ case SPEED_10:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case SPEED_100:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case SPEED_1000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case SPEED_10000:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ default:
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ }
+
+ dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
+ ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
+ ETH_LINK_FIXED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to enable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
+}
+
+/**
+ * DPDK callback to enable allmulti mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
+}
+
+/**
+ * DPDK callback to disable promiscuous mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
+}
+
+/**
+ * DPDK callback to disable allmulticast mode.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+static void
+mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ ret = pp2_ppio_remove_mac_addr(priv->ppio,
+ dev->data->mac_addrs[index].addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf),
+ &dev->data->mac_addrs[index]);
+ RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (unused).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ char buf[ETHER_ADDR_FMT_SIZE];
+ int ret;
+
+ if (index == 0)
+ /* For setting index 0, mrvl_mac_addr_set() should be used.*/
+ return -1;
+
+ /*
+ * Maximum number of uc addresses can be tuned via kernel module mvpp2x
+ * parameter uc_filter_max. Maximum number of mc addresses is then
+ * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
+ * 21 respectively.
+ *
+ * If more than uc_filter_max uc addresses were added to filter list
+ * then NIC will switch to promiscuous mode automatically.
+ *
+ * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
+ * were added to filter list then NIC will switch to all-multicast mode
+ * automatically.
+ */
+ ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+static void
+mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ /*
+ * TODO
+ * Port stops sending packets if pp2_ppio_set_mac_addr()
+ * was called after pp2_ppio_enable(). As a quick fix issue
+ * enable port once again.
+ */
+ pp2_ppio_enable(priv->ppio);
+}
+
+/**
+ * DPDK callback to get device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Stats structure output buffer.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ uint64_t drop_mac = 0;
+ unsigned int i, idx, ret;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+ struct pp2_ppio_inq_statistics rx_stats;
+
+ if (!rxq)
+ continue;
+
+ idx = rxq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ RTE_LOG(ERR, PMD,
+ "rx queue %d stats out of range (0 - %d)\n",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ continue;
+ }
+
+ ret = pp2_ppio_inq_get_statistics(priv->ppio,
+ priv->rxq_map[idx].tc,
+ priv->rxq_map[idx].inq,
+ &rx_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD,
+ "Failed to update rx queue %d stats\n", idx);
+ break;
+ }
+
+ stats->q_ibytes[idx] = rxq->bytes_recv;
+ stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
+ stats->q_errors[idx] = rx_stats.drop_early +
+ rx_stats.drop_fullq +
+ rx_stats.drop_bm +
+ rxq->drop_mac;
+ stats->ibytes += rxq->bytes_recv;
+ drop_mac += rxq->drop_mac;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+ struct pp2_ppio_outq_statistics tx_stats;
+
+ if (!txq)
+ continue;
+
+ idx = txq->queue_id;
+ if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
+ RTE_LOG(ERR, PMD,
+ "tx queue %d stats out of range (0 - %d)\n",
+ idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
+ }
+
+ ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
+ &tx_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD,
+ "Failed to update tx queue %d stats\n", idx);
+ break;
+ }
+
+ stats->q_opackets[idx] = tx_stats.deq_desc;
+ stats->q_obytes[idx] = txq->bytes_sent;
+ stats->obytes += txq->bytes_sent;
+ }
+
+ ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ if (unlikely(ret)) {
+ RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ return ret;
+ }
+
+ stats->ipackets += ppio_stats.rx_packets - drop_mac;
+ stats->opackets += ppio_stats.tx_packets;
+ stats->imissed += ppio_stats.rx_fullq_dropped +
+ ppio_stats.rx_bm_dropped +
+ ppio_stats.rx_early_dropped +
+ ppio_stats.rx_fifo_dropped +
+ ppio_stats.rx_cls_dropped;
+ stats->ierrors = drop_mac;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to clear device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_stats_reset(struct rte_eth_dev *dev)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct mrvl_rxq *rxq = dev->data->rx_queues[i];
+
+ pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
+ priv->rxq_map[i].inq, NULL, 1);
+ rxq->bytes_recv = 0;
+ rxq->drop_mac = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
+ txq->bytes_sent = 0;
+ }
+
+ pp2_ppio_get_statistics(priv->ppio, NULL, 1);
+}
+
+/**
+ * DPDK callback to get information about the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ * @param info
+ * Info structure output buffer.
+ */
+static void
+mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *info)
+{
+ info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+
+ info->max_rx_queues = MRVL_PP2_RXQ_MAX;
+ info->max_tx_queues = MRVL_PP2_TXQ_MAX;
+ info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
+
+ info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
+ info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
+ info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
+
+ info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
+ info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
+ info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
+
+ info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ info->flow_type_rss_offloads = ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP;
+
+ /* By default packets are dropped if no descriptors are available */
+ info->default_rxconf.rx_drop_en = 1;
+
+ info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
+}
+
+/**
+ * Return supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure (unused).
+ *
+ * @return
+ * Const pointer to the table with supported packet types.
+ */
+static const uint32_t *
+mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP
+ };
+
+ return ptypes;
+}
+
+/**
+ * DPDK callback to get information about specific receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * Receive queue index.
+ * @param qinfo
+ * Receive queue information structure.
+ */
+static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int inq = priv->rxq_map[rx_queue_id].inq;
+ int tc = priv->rxq_map[rx_queue_id].tc;
+ struct pp2_ppio_tc_params *tc_params =
+ &priv->ppio_params.inqs_params.tcs_params[tc];
+
+ qinfo->mp = q->mp;
+ qinfo->nb_desc = tc_params->inqs_params[inq].size;
+}
+
+/**
+ * DPDK callback to get information about specific transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param tx_queue_id
+ * Transmit queue index.
+ * @param qinfo
+ * Transmit queue information structure.
+ */
+static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ qinfo->nb_desc =
+ priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+}
+
+/**
+ * DPDK callback to Configure a VLAN filter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param vlan_id
+ * VLAN ID to filter.
+ * @param on
+ * Toggle filter.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
+ pp2_ppio_remove_vlan(priv->ppio, vlan_id);
+}
+
+/**
+ * Release buffers to hardware bpool (buffer-pool)
+ *
+ * @param rxq
+ * Receive queue pointer.
+ * @param num
+ * Number of buffers to release to bpool.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
+{
+ struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
+ struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
+ int i, ret;
+ unsigned int core_id = rte_lcore_id();
+ struct pp2_hif *hif = hifs[core_id];
+ struct pp2_bpool *bpool = rxq->priv->bpool;
+
+ ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
+ if (ret)
+ return ret;
+
+ if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
+ cookie_addr_high =
+ (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
+
+ for (i = 0; i < num; i++) {
+ if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
+ != cookie_addr_high) {
+ RTE_LOG(ERR, PMD,
+ "mbuf virtual addr high 0x%lx out of range\n",
+ (uint64_t)mbufs[i] >> 32);
+ goto out;
+ }
+
+ entries[i].buff.addr =
+ rte_mbuf_data_iova_default(mbufs[i]);
+ entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
+ entries[i].bpool = bpool;
+ }
+
+ pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
+ mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
+
+ if (i != num)
+ goto out;
+
+ return 0;
+out:
+ for (; i < num; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ return -1;
+}
+
+/**
+ * DPDK callback to configure the receive queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Thresholds parameters (unused_).
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_rxconf *conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_rxq *rxq;
+ uint32_t min_size,
+ max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int ret, tc, inq;
+
+ if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
+ /*
+ * Unknown TC mapping, mapping will not have a correct queue.
+ */
+ RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
+ idx, priv->ppio_id);
+ return -EFAULT;
+ }
+
+ min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
+ MRVL_PKT_EFFEC_OFFS;
+ if (min_size < max_rx_pkt_len) {
+ RTE_LOG(ERR, PMD,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
+ max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
+ MRVL_PKT_EFFEC_OFFS,
+ max_rx_pkt_len);
+ return -EINVAL;
+ }
+
+ if (dev->data->rx_queues[idx]) {
+ rte_free(dev->data->rx_queues[idx]);
+ dev->data->rx_queues[idx] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
+ if (!rxq)
+ return -ENOMEM;
+
+ rxq->priv = priv;
+ rxq->mp = mp;
+ rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->queue_id = idx;
+ rxq->port_id = dev->data->port_id;
+ mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
+
+ tc = priv->rxq_map[rxq->queue_id].tc,
+ inq = priv->rxq_map[rxq->queue_id].inq;
+ priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
+ desc;
+
+ ret = mrvl_fill_bpool(rxq, desc);
+ if (ret) {
+ rte_free(rxq);
+ return ret;
+ }
+
+ priv->bpool_init_size += desc;
+
+ dev->data->rx_queues[idx] = rxq;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the receive queue.
+ *
+ * @param rxq
+ * Generic receive queue pointer.
+ */
+static void
+mrvl_rx_queue_release(void *rxq)
+{
+ struct mrvl_rxq *q = rxq;
+ struct pp2_ppio_tc_params *tc_params;
+ int i, num, tc, inq;
+
+ if (!q)
+ return;
+
+ tc = q->priv->rxq_map[q->queue_id].tc;
+ inq = q->priv->rxq_map[q->queue_id].inq;
+ tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
+ num = tc_params->inqs_params[inq].size;
+ for (i = 0; i < num; i++) {
+ struct pp2_buff_inf inf;
+ uint64_t addr;
+
+ pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
+ addr = cookie_addr_high | inf.cookie;
+ rte_pktmbuf_free((struct rte_mbuf *)addr);
+ }
+
+ rte_free(q);
+}
+
+/**
+ * DPDK callback to configure the transmit queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Transmit queue index.
+ * @param desc
+ * Number of descriptors to configure in the queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param conf
+ * Thresholds parameters (unused).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf __rte_unused)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq;
+
+ if (dev->data->tx_queues[idx]) {
+ rte_free(dev->data->tx_queues[idx]);
+ dev->data->tx_queues[idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->priv = priv;
+ txq->queue_id = idx;
+ txq->port_id = dev->data->port_id;
+ dev->data->tx_queues[idx] = txq;
+
+ priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
+ priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to release the transmit queue.
+ *
+ * @param txq
+ * Generic transmit queue pointer.
+ */
+static void
+mrvl_tx_queue_release(void *txq)
+{
+ struct mrvl_txq *q = txq;
+
+ if (!q)
+ return;
+
+ rte_free(q);
+}
+
+/**
+ * Update RSS hash configuration
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ return mrvl_configure_rss(priv, rss_conf);
+}
+
+/**
+ * DPDK callback to get RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @rss_conf
+ * Pointer to RSS configuration.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ enum pp2_ppio_hash_type hash_type =
+ priv->ppio_params.inqs_params.hash_type;
+
+ rss_conf->rss_key = NULL;
+
+ if (hash_type == PP2_PPIO_HASH_T_NONE)
+ rss_conf->rss_hf = 0;
+ else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
+ rss_conf->rss_hf = ETH_RSS_IPV4;
+ else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
+ rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+ else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
+ rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+
+ return 0;
+}
+
+static const struct eth_dev_ops mrvl_ops = {
+ .dev_configure = mrvl_dev_configure,
+ .dev_start = mrvl_dev_start,
+ .dev_stop = mrvl_dev_stop,
+ .dev_set_link_up = mrvl_dev_set_link_up,
+ .dev_set_link_down = mrvl_dev_set_link_down,
+ .dev_close = mrvl_dev_close,
+ .link_update = mrvl_link_update,
+ .promiscuous_enable = mrvl_promiscuous_enable,
+ .allmulticast_enable = mrvl_allmulticast_enable,
+ .promiscuous_disable = mrvl_promiscuous_disable,
+ .allmulticast_disable = mrvl_allmulticast_disable,
+ .mac_addr_remove = mrvl_mac_addr_remove,
+ .mac_addr_add = mrvl_mac_addr_add,
+ .mac_addr_set = mrvl_mac_addr_set,
+ .mtu_set = mrvl_mtu_set,
+ .stats_get = mrvl_stats_get,
+ .stats_reset = mrvl_stats_reset,
+ .dev_infos_get = mrvl_dev_infos_get,
+ .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
+ .rxq_info_get = mrvl_rxq_info_get,
+ .txq_info_get = mrvl_txq_info_get,
+ .vlan_filter_set = mrvl_vlan_filter_set,
+ .rx_queue_setup = mrvl_rx_queue_setup,
+ .rx_queue_release = mrvl_rx_queue_release,
+ .tx_queue_setup = mrvl_tx_queue_setup,
+ .tx_queue_release = mrvl_tx_queue_release,
+ .rss_hash_update = mrvl_rss_hash_update,
+ .rss_hash_conf_get = mrvl_rss_hash_conf_get,
+};
+
+/**
+ * Return packet type information and l3/l4 offsets.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ * @param l3_offset
+ * l3 packet offset.
+ * @param l4_offset
+ * l4 packet offset.
+ *
+ * @return
+ * Packet type information.
+ */
+static inline uint64_t
+mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
+ uint8_t *l3_offset, uint8_t *l4_offset)
+{
+ enum pp2_inq_l3_type l3_type;
+ enum pp2_inq_l4_type l4_type;
+ uint64_t packet_type;
+
+ pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
+ pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
+
+ packet_type = RTE_PTYPE_L2_ETHER;
+
+ switch (l3_type) {
+ case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ break;
+ case PP2_INQ_L3_TYPE_IPV4_OK:
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ break;
+ case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ break;
+ case PP2_INQ_L3_TYPE_IPV6_EXT:
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case PP2_INQ_L3_TYPE_ARP:
+ packet_type |= RTE_PTYPE_L2_ETHER_ARP;
+ /*
+ * In case of ARP l4_offset is set to wrong value.
+ * Set it to proper one so that later on mbuf->l3_len can be
+ * calculated subtracting l4_offset and l3_offset.
+ */
+ *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
+ break;
+ default:
+ RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
+ break;
+ }
+
+ switch (l4_type) {
+ case PP2_INQ_L4_TYPE_TCP:
+ packet_type |= RTE_PTYPE_L4_TCP;
+ break;
+ case PP2_INQ_L4_TYPE_UDP:
+ packet_type |= RTE_PTYPE_L4_UDP;
+ break;
+ default:
+ RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
+ break;
+ }
+
+ return packet_type;
+}
+
+/**
+ * Get offload information from the received packet descriptor.
+ *
+ * @param desc
+ * Pointer to the received packet descriptor.
+ *
+ * @return
+ * Mbuf offload flags.
+ */
+static inline uint64_t
+mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
+{
+ uint64_t flags;
+ enum pp2_inq_desc_status status;
+
+ status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags = PKT_RX_IP_CKSUM_BAD;
+ else
+ flags = PKT_RX_IP_CKSUM_GOOD;
+
+ status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ return flags;
+}
+
+/**
+ * DPDK callback for receive.
+ *
+ * @param rxq
+ * Generic pointer to the receive queue.
+ * @param rx_pkts
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received.
+ */
+static uint16_t
+mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_rxq *q = rxq;
+ struct pp2_ppio_desc descs[nb_pkts];
+ struct pp2_bpool *bpool;
+ int i, ret, rx_done = 0;
+ int num;
+ unsigned int core_id = rte_lcore_id();
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ bpool = q->priv->bpool;
+
+ ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
+ q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
+ if (unlikely(ret < 0)) {
+ RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ return 0;
+ }
+ mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf;
+ uint8_t l3_offset, l4_offset;
+ enum pp2_inq_desc_status status;
+ uint64_t addr;
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct pp2_ppio_desc *pref_desc;
+ u64 pref_addr;
+
+ pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ pref_addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(pref_desc);
+ rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
+ rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
+ }
+
+ addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(&descs[i]);
+ mbuf = (struct rte_mbuf *)addr;
+ rte_pktmbuf_reset(mbuf);
+
+ /* drop packet in case of mac, overrun or resource error */
+ status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
+ if (unlikely(status != PP2_DESC_ERR_OK)) {
+ struct pp2_buff_inf binf = {
+ .addr = rte_mbuf_data_iova_default(mbuf),
+ .cookie = (pp2_cookie_t)(uint64_t)mbuf,
+ };
+
+ pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id]++;
+ q->drop_mac++;
+ continue;
+ }
+
+ mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
+ mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = q->port_id;
+ mbuf->packet_type =
+ mrvl_desc_to_packet_type_and_offset(&descs[i],
+ &l3_offset,
+ &l4_offset);
+ mbuf->l2_len = l3_offset;
+ mbuf->l3_len = l4_offset - l3_offset;
+
+ if (likely(q->cksum_enabled))
+ mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
+
+ rx_pkts[rx_done++] = mbuf;
+ q->bytes_recv += mbuf->pkt_len;
+ }
+
+ if (rte_spinlock_trylock(&q->priv->lock) == 1) {
+ num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
+
+ if (unlikely(num <= q->priv->bpool_min_size ||
+ (!rx_done && num < q->priv->bpool_init_size))) {
+ ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ } else if (unlikely(num > q->priv->bpool_max_size)) {
+ int i;
+ int pkt_to_remove = num - q->priv->bpool_init_size;
+ struct rte_mbuf *mbuf;
+ struct pp2_buff_inf buff;
+
+ RTE_LOG(DEBUG, PMD,
+ "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ bpool->pp2_id, q->priv->ppio->port_id,
+ bpool->id, pkt_to_remove, num,
+ q->priv->bpool_init_size);
+
+ for (i = 0; i < pkt_to_remove; i++) {
+ pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ }
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id] -=
+ pkt_to_remove;
+ }
+ rte_spinlock_unlock(&q->priv->lock);
+ }
+
+ return rx_done;
+}
+
+/**
+ * Prepare offload information.
+ *
+ * @param ol_flags
+ * Offload flags.
+ * @param packet_type
+ * Packet type bitfield.
+ * @param l3_type
+ * Pointer to the pp2_ouq_l3_type structure.
+ * @param l4_type
+ * Pointer to the pp2_outq_l4_type structure.
+ * @param gen_l3_cksum
+ * Will be set to 1 in case l3 checksum is computed.
+ * @param l4_cksum
+ * Will be set to 1 in case l4 checksum is computed.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static inline int
+mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
+ enum pp2_outq_l3_type *l3_type,
+ enum pp2_outq_l4_type *l4_type,
+ int *gen_l3_cksum,
+ int *gen_l4_cksum)
+{
+ /*
+ * Based on ol_flags prepare information
+ * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
+ * for offloading.
+ */
+ if (ol_flags & PKT_TX_IPV4) {
+ *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
+ *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
+ /* no checksum for ipv6 header */
+ *gen_l3_cksum = 0;
+ } else {
+ /* if something different then stop processing */
+ return -1;
+ }
+
+ ol_flags &= PKT_TX_L4_MASK;
+ if ((packet_type & RTE_PTYPE_L4_TCP) &&
+ ol_flags == PKT_TX_TCP_CKSUM) {
+ *l4_type = PP2_OUTQ_L4_TYPE_TCP;
+ *gen_l4_cksum = 1;
+ } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
+ ol_flags == PKT_TX_UDP_CKSUM) {
+ *l4_type = PP2_OUTQ_L4_TYPE_UDP;
+ *gen_l4_cksum = 1;
+ } else {
+ *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
+ /* no checksum for other type */
+ *gen_l4_cksum = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Release already sent buffers to bpool (buffer-pool).
+ *
+ * @param ppio
+ * Pointer to the port structure.
+ * @param hif
+ * Pointer to the MUSDK hardware interface.
+ * @param sq
+ * Pointer to the shadow queue.
+ * @param qid
+ * Queue id number.
+ * @param force
+ * Force releasing packets.
+ */
+static inline void
+mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
+ struct mrvl_shadow_txq *sq, int qid, int force)
+{
+ struct buff_release_entry *entry;
+ uint16_t nb_done = 0, num = 0, skip_bufs = 0;
+ int i, core_id = rte_lcore_id();
+
+ pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
+
+ sq->num_to_release += nb_done;
+
+ if (likely(!force &&
+ sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
+ return;
+
+ nb_done = sq->num_to_release;
+ sq->num_to_release = 0;
+
+ for (i = 0; i < nb_done; i++) {
+ entry = &sq->ent[sq->tail + num];
+ if (unlikely(!entry->buff.addr)) {
+ RTE_LOG(ERR, PMD,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ sq->tail, (u64)entry->buff.cookie,
+ (u64)entry->buff.addr);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ if (unlikely(!entry->bpool)) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | entry->buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ mrvl_port_bpool_size
+ [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
+ num++;
+ if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
+ goto skip;
+ continue;
+skip:
+ if (likely(num))
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ num += skip_bufs;
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ num = 0;
+ }
+
+ if (likely(num)) {
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ }
+}
+
+/**
+ * DPDK callback for transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_txq *q = txq;
+ struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
+ struct pp2_hif *hif = hifs[rte_lcore_id()];
+ struct pp2_ppio_desc descs[nb_pkts];
+ int i, ret, bytes_sent = 0;
+ uint16_t num, sq_free_size;
+ uint64_t addr;
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
+
+ sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
+ if (unlikely(nb_pkts > sq_free_size)) {
+ RTE_LOG(DEBUG, PMD,
+ "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ nb_pkts, sq_free_size);
+ nb_pkts = sq_free_size;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+ int gen_l3_cksum, gen_l4_cksum;
+ enum pp2_outq_l3_type l3_type;
+ enum pp2_outq_l4_type l4_type;
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct rte_mbuf *pref_pkt_hdr;
+
+ pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ rte_mbuf_prefetch_part1(pref_pkt_hdr);
+ rte_mbuf_prefetch_part2(pref_pkt_hdr);
+ }
+
+ sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
+ sq->ent[sq->head].buff.addr =
+ rte_mbuf_data_iova_default(mbuf);
+ sq->ent[sq->head].bpool =
+ (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
+ NULL : mrvl_port_to_bpool_lookup[mbuf->port];
+ sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size++;
+
+ pp2_ppio_outq_desc_reset(&descs[i]);
+ pp2_ppio_outq_desc_set_phys_addr(&descs[i],
+ rte_pktmbuf_iova(mbuf));
+ pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
+ pp2_ppio_outq_desc_set_pkt_len(&descs[i],
+ rte_pktmbuf_pkt_len(mbuf));
+
+ bytes_sent += rte_pktmbuf_pkt_len(mbuf);
+ /*
+ * in case unsupported ol_flags were passed
+ * do not update descriptor offload information
+ */
+ ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
+ &l3_type, &l4_type, &gen_l3_cksum,
+ &gen_l4_cksum);
+ if (unlikely(ret))
+ continue;
+
+ pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
+ mbuf->l2_len,
+ mbuf->l2_len + mbuf->l3_len,
+ gen_l3_cksum, gen_l4_cksum);
+ }
+
+ num = nb_pkts;
+ pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
+ /* number of packets that were not sent */
+ if (unlikely(num > nb_pkts)) {
+ for (i = nb_pkts; i < num; i++) {
+ sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
+ bytes_sent -=
+ rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
+ }
+ sq->size -= num - nb_pkts;
+ }
+
+ q->bytes_sent += bytes_sent;
+
+ return nb_pkts;
+}
+
+/**
+ * Initialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_init_pp2(void)
+{
+ struct pp2_init_params init_params;
+
+ memset(&init_params, 0, sizeof(init_params));
+ init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
+ init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
+ init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
+
+ return pp2_init(&init_params);
+}
+
+/**
+ * Deinitialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static void
+mrvl_deinit_pp2(void)
+{
+ pp2_deinit();
+}
+
+/**
+ * Create private device structure.
+ *
+ * @param dev_name
+ * Pointer to the port name passed in the initialization parameters.
+ *
+ * @return
+ * Pointer to the newly allocated private device structure.
+ */
+static struct mrvl_priv *
+mrvl_priv_create(const char *dev_name)
+{
+ struct pp2_bpool_params bpool_params;
+ char match[MRVL_MATCH_LEN];
+ struct mrvl_priv *priv;
+ int ret, bpool_bit;
+
+ priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
+ if (!priv)
+ return NULL;
+
+ ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
+ &priv->pp_id, &priv->ppio_id);
+ if (ret)
+ goto out_free_priv;
+
+ bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
+ PP2_BPOOL_NUM_POOLS);
+ if (bpool_bit < 0)
+ goto out_free_priv;
+ priv->bpool_bit = bpool_bit;
+
+ snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
+ priv->bpool_bit);
+ memset(&bpool_params, 0, sizeof(bpool_params));
+ bpool_params.match = match;
+ bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
+ ret = pp2_bpool_init(&bpool_params, &priv->bpool);
+ if (ret)
+ goto out_clear_bpool_bit;
+
+ priv->ppio_params.type = PP2_PPIO_T_NIC;
+ rte_spinlock_init(&priv->lock);
+
+ return priv;
+out_clear_bpool_bit:
+ used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
+out_free_priv:
+ rte_free(priv);
+ return NULL;
+}
+
+/**
+ * Create device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port's name.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
+{
+ int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
+ struct rte_eth_dev *eth_dev;
+ struct mrvl_priv *priv;
+ struct ifreq req;
+
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ priv = mrvl_priv_create(name);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ eth_dev->data->mac_addrs =
+ rte_zmalloc("mac_addrs",
+ ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
+ if (!eth_dev->data->mac_addrs) {
+ RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ memset(&req, 0, sizeof(req));
+ strcpy(req.ifr_name, name);
+ ret = ioctl(fd, SIOCGIFHWADDR, &req);
+ if (ret)
+ goto out_free_mac;
+
+ memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+ req.ifr_addr.sa_data, ETHER_ADDR_LEN);
+
+ eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
+ eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
+ eth_dev->data->dev_private = priv;
+ eth_dev->device = &vdev->device;
+ eth_dev->dev_ops = &mrvl_ops;
+
+ return 0;
+out_free_mac:
+ rte_free(eth_dev->data->mac_addrs);
+out_free_dev:
+ rte_eth_dev_release_port(eth_dev);
+out_free_priv:
+ rte_free(priv);
+
+ return ret;
+}
+
+/**
+ * Cleanup previously created device representing Ethernet port.
+ *
+ * @param name
+ * Pointer to the port name.
+ */
+static void
+mrvl_eth_dev_destroy(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+ struct mrvl_priv *priv;
+
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return;
+
+ priv = eth_dev->data->dev_private;
+ pp2_bpool_deinit(priv->bpool);
+ rte_free(priv);
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+}
+
+/**
+ * Callback used by rte_kvargs_process() during argument parsing.
+ *
+ * @param key
+ * Pointer to the parsed key (unused).
+ * @param value
+ * Pointer to the parsed value.
+ * @param extra_args
+ * Pointer to the extra arguments which contains address of the
+ * table of pointers to parsed interface names.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+mrvl_get_ifnames(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ const char **ifnames = extra_args;
+
+ ifnames[mrvl_ports_nb++] = value;
+
+ return 0;
+}
+
+/**
+ * Initialize per-lcore MUSDK hardware interfaces (hifs).
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_init_hifs(void)
+{
+ struct pp2_hif_params params;
+ char match[MRVL_MATCH_LEN];
+ int i, ret;
+
+ RTE_LCORE_FOREACH(i) {
+ ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
+ if (ret < 0)
+ return ret;
+
+ snprintf(match, sizeof(match), "hif-%d", ret);
+ memset(&params, 0, sizeof(params));
+ params.match = match;
+ params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
+ ret = pp2_hif_init(&params, &hifs[i]);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
+ */
+static void
+mrvl_deinit_hifs(void)
+{
+ int i;
+
+ RTE_LCORE_FOREACH(i) {
+ if (hifs[i])
+ pp2_hif_deinit(hifs[i]);
+ }
+}
+
+static void mrvl_set_first_last_cores(int core_id)
+{
+ if (core_id < mrvl_lcore_first)
+ mrvl_lcore_first = core_id;
+
+ if (core_id > mrvl_lcore_last)
+ mrvl_lcore_last = core_id;
+}
+
+/**
+ * DPDK callback to register the virtual device.
+ *
+ * @param vdev
+ * Pointer to the virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_kvargs *kvlist;
+ const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ int ret = -EINVAL;
+ uint32_t i, ifnum, cfgnum, core_id;
+ const char *params;
+
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ kvlist = rte_kvargs_parse(params, valid_args);
+ if (!kvlist)
+ return -EINVAL;
+
+ ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
+ if (ifnum > RTE_DIM(ifnames))
+ goto out_free_kvlist;
+
+ rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
+ mrvl_get_ifnames, &ifnames);
+
+ cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
+ if (cfgnum > 1) {
+ RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ goto out_free_kvlist;
+ } else if (cfgnum == 1) {
+ rte_kvargs_process(kvlist, MRVL_CFG_ARG,
+ mrvl_get_qoscfg, &mrvl_qos_cfg);
+ }
+
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized (by another PMD).
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ goto out_free_kvlist;
+ else
+ RTE_LOG(INFO, PMD,
+ "DMA memory has been already initialized by a different driver.\n");
+ }
+
+ ret = mrvl_init_pp2();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to init PP!\n");
+ goto out_deinit_dma;
+ }
+
+ ret = mrvl_init_hifs();
+ if (ret)
+ goto out_deinit_hifs;
+
+ for (i = 0; i < ifnum; i++) {
+ RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
+ ret = mrvl_eth_dev_create(vdev, ifnames[i]);
+ if (ret)
+ goto out_cleanup;
+ }
+
+ rte_kvargs_free(kvlist);
+
+ memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
+
+ mrvl_lcore_first = RTE_MAX_LCORE;
+ mrvl_lcore_last = 0;
+
+ RTE_LCORE_FOREACH(core_id) {
+ mrvl_set_first_last_cores(core_id);
+ }
+
+ return 0;
+out_cleanup:
+ for (; i > 0; i--)
+ mrvl_eth_dev_destroy(ifnames[i]);
+out_deinit_hifs:
+ mrvl_deinit_hifs();
+ mrvl_deinit_pp2();
+out_deinit_dma:
+ mv_sys_dma_mem_destroy();
+out_free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/**
+ * DPDK callback to remove virtual device.
+ *
+ * @param vdev
+ * Pointer to the removed virtual device.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
+{
+ int i;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (!name)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Removing %s\n", name);
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ char ifname[RTE_ETH_NAME_MAX_LEN];
+
+ rte_eth_dev_get_name_by_port(i, ifname);
+ mrvl_eth_dev_destroy(ifname);
+ }
+
+ mrvl_deinit_hifs();
+ mrvl_deinit_pp2();
+ mv_sys_dma_mem_destroy();
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_mrvl_drv = {
+ .probe = rte_pmd_mrvl_probe,
+ .remove = rte_pmd_mrvl_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
+RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);
diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mrvl/mrvl_ethdev.h
new file mode 100644
index 00000000..2a4ab5ab
--- /dev/null
+++ b/drivers/net/mrvl/mrvl_ethdev.h
@@ -0,0 +1,116 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MRVL_ETHDEV_H_
+#define _MRVL_ETHDEV_H_
+
+#include <rte_spinlock.h>
+#include <drivers/mv_pp2_cls.h>
+#include <drivers/mv_pp2_ppio.h>
+
+/** Maximum number of rx queues per port */
+#define MRVL_PP2_RXQ_MAX 32
+
+/** Maximum number of tx queues per port */
+#define MRVL_PP2_TXQ_MAX 8
+
+/** Minimum number of descriptors in tx queue */
+#define MRVL_PP2_TXD_MIN 16
+
+/** Maximum number of descriptors in tx queue */
+#define MRVL_PP2_TXD_MAX 2048
+
+/** Tx queue descriptors alignment */
+#define MRVL_PP2_TXD_ALIGN 16
+
+/** Minimum number of descriptors in rx queue */
+#define MRVL_PP2_RXD_MIN 16
+
+/** Maximum number of descriptors in rx queue */
+#define MRVL_PP2_RXD_MAX 2048
+
+/** Rx queue descriptors alignment */
+#define MRVL_PP2_RXD_ALIGN 16
+
+/** Maximum number of descriptors in tx aggregated queue */
+#define MRVL_PP2_AGGR_TXQD_MAX 2048
+
+/** Maximum number of Traffic Classes. */
+#define MRVL_PP2_TC_MAX 8
+
+/** Packet offset inside RX buffer. */
+#define MRVL_PKT_OFFS 64
+
+/** Maximum number of descriptors in shadow queue. Must be power of 2 */
+#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX
+
+/** Shadow queue size mask (since shadow queue size is power of 2) */
+#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1)
+
+/** Minimum number of sent buffers to release from shadow queue to BM */
+#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64
+
+struct mrvl_priv {
+ /* Hot fields, used in fast path. */
+ struct pp2_bpool *bpool; /**< BPool pointer */
+ struct pp2_ppio *ppio; /**< Port handler pointer */
+ rte_spinlock_t lock; /**< Spinlock for checking bpool status */
+ uint16_t bpool_max_size; /**< BPool maximum size */
+ uint16_t bpool_min_size; /**< BPool minimum size */
+ uint16_t bpool_init_size; /**< Configured BPool size */
+
+ /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */
+ struct {
+ uint8_t tc; /**< Traffic Class */
+ uint8_t inq; /**< Relative in-queue number */
+ } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned;
+
+ /* Configuration data, used sporadically. */
+ uint8_t pp_id;
+ uint8_t ppio_id;
+ uint8_t bpool_bit;
+ uint8_t rss_hf_tcp;
+ uint8_t uc_mc_flushed;
+ uint8_t vlan_flushed;
+
+ struct pp2_ppio_params ppio_params;
+ struct pp2_cls_qos_tbl_params qos_tbl_params;
+ struct pp2_cls_tbl *qos_tbl;
+ uint16_t nb_rx_queues;
+};
+
+/** Number of ports configured. */
+extern int mrvl_ports_nb;
+
+#endif /* _MRVL_ETHDEV_H_ */
diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mrvl/mrvl_qos.c
new file mode 100644
index 00000000..7c9943aa
--- /dev/null
+++ b/drivers/net/mrvl/mrvl_qos.c
@@ -0,0 +1,636 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cfgfile.h>
+#include <rte_log.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+
+#include "mrvl_qos.h"
+
+/* Parsing tokens. Defined conveniently, so that any correction is easy. */
+#define MRVL_TOK_DEFAULT "default"
+#define MRVL_TOK_DEFAULT_TC "default_tc"
+#define MRVL_TOK_DSCP "dscp"
+#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority"
+#define MRVL_TOK_IP "ip"
+#define MRVL_TOK_IP_VLAN "ip/vlan"
+#define MRVL_TOK_PCP "pcp"
+#define MRVL_TOK_PORT "port"
+#define MRVL_TOK_RXQ "rxq"
+#define MRVL_TOK_SP "SP"
+#define MRVL_TOK_TC "tc"
+#define MRVL_TOK_TXQ "txq"
+#define MRVL_TOK_VLAN "vlan"
+#define MRVL_TOK_VLAN_IP "vlan/ip"
+#define MRVL_TOK_WEIGHT "weight"
+
+/** Number of tokens in range a-b = 2. */
+#define MAX_RNG_TOKENS 2
+
+/** Maximum possible value of PCP. */
+#define MAX_PCP 7
+
+/** Maximum possible value of DSCP. */
+#define MAX_DSCP 63
+
+/** Global QoS configuration. */
+struct mrvl_qos_cfg *mrvl_qos_cfg;
+
+/**
+ * Convert string to uint32_t with extra checks for result correctness.
+ *
+ * @param string String to convert.
+ * @param val Conversion result.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+get_val_securely(const char *string, uint32_t *val)
+{
+ char *endptr;
+ size_t len = strlen(string);
+
+ if (len == 0)
+ return -1;
+
+ errno = 0;
+ *val = strtoul(string, &endptr, 0);
+ if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len)
+ return -2;
+
+ return 0;
+}
+
+/**
+ * Read out-queue configuration from file.
+ *
+ * @param file Path to the configuration file.
+ * @param port Port number.
+ * @param outq Out queue number.
+ * @param cfg Pointer to the Marvell QoS configuration structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
+ struct mrvl_qos_cfg *cfg)
+{
+ char sec_name[32];
+ const char *entry;
+ uint32_t val;
+
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s %d",
+ MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq);
+
+ /* Skip non-existing */
+ if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
+ return 0;
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_WEIGHT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].weight = (uint8_t)val;
+ }
+
+ return 0;
+}
+
+/**
+ * Gets multiple-entry values and places them in table.
+ *
+ * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to
+ * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}.
+ * As all result table's elements are always 1-byte long, we
+ * won't overcomplicate the function, but we'll keep API generic,
+ * check if someone hasn't changed element size and make it simple
+ * to extend to other sizes.
+ *
+ * This function is purely utilitary, it does not print any error, only returns
+ * different error numbers.
+ *
+ * @param entry[in] Values string to parse.
+ * @param tab[out] Results table.
+ * @param elem_sz[in] Element size (in bytes).
+ * @param max_elems[in] Number of results table elements available.
+ * @param max val[in] Maximum value allowed.
+ * @returns Number of correctly parsed elements in case of success.
+ * @retval -1 Wrong element size.
+ * @retval -2 More tokens than result table allows.
+ * @retval -3 Wrong range syntax.
+ * @retval -4 Wrong range values.
+ * @retval -5 Maximum value exceeded.
+ */
+static int
+get_entry_values(const char *entry, uint8_t *tab,
+ size_t elem_sz, uint8_t max_elems, uint8_t max_val)
+{
+ /* There should not be more tokens than max elements.
+ * Add 1 for error trap.
+ */
+ char *tokens[max_elems + 1];
+
+ /* Begin, End + error trap = 3. */
+ char *rng_tokens[MAX_RNG_TOKENS + 1];
+ long beg, end;
+ uint32_t token_val;
+ int nb_tokens, nb_rng_tokens;
+ int i;
+ int values = 0;
+ char val;
+ char entry_cpy[CFG_VALUE_LEN];
+
+ if (elem_sz != 1)
+ return -1;
+
+ /* Copy the entry to safely use rte_strsplit(). */
+ snprintf(entry_cpy, RTE_DIM(entry_cpy), "%s", entry);
+
+ /*
+ * If there are more tokens than array size, rte_strsplit will
+ * not return error, just array size.
+ */
+ nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy),
+ tokens, max_elems + 1, ' ');
+
+ /* Quick check, will be refined later. */
+ if (nb_tokens > max_elems)
+ return -2;
+
+ for (i = 0; i < nb_tokens; ++i) {
+ if (strchr(tokens[i], '-') != NULL) {
+ /*
+ * Split to begin and end tokens.
+ * We want to catch error cases too, thus we leave
+ * option for number of tokens to be more than 2.
+ */
+ nb_rng_tokens = rte_strsplit(tokens[i],
+ strlen(tokens[i]), rng_tokens,
+ RTE_DIM(rng_tokens), '-');
+ if (nb_rng_tokens != 2)
+ return -3;
+
+ /* Range and sanity checks. */
+ if (get_val_securely(rng_tokens[0], &token_val) < 0)
+ return -4;
+ beg = (char)token_val;
+ if (get_val_securely(rng_tokens[1], &token_val) < 0)
+ return -4;
+ end = (char)token_val;
+ if (beg < 0 || beg > UCHAR_MAX ||
+ end < 0 || end > UCHAR_MAX || end < beg)
+ return -4;
+
+ for (val = beg; val <= end; ++val) {
+ if (val > max_val)
+ return -5;
+
+ *tab = val;
+ tab = RTE_PTR_ADD(tab, elem_sz);
+ ++values;
+ if (values >= max_elems)
+ return -2;
+ }
+ } else {
+ /* Single values. */
+ if (get_val_securely(tokens[i], &token_val) < 0)
+ return -5;
+ val = (char)token_val;
+ if (val > max_val)
+ return -5;
+
+ *tab = val;
+ tab = RTE_PTR_ADD(tab, elem_sz);
+ ++values;
+ if (values >= max_elems)
+ return -2;
+ }
+ }
+
+ return values;
+}
+
+/**
+ * Parse Traffic Class'es mapping configuration.
+ *
+ * @param file Config file handle.
+ * @param port Which port to look for.
+ * @param tc Which Traffic Class to look for.
+ * @param cfg[out] Parsing results.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
+ struct mrvl_qos_cfg *cfg)
+{
+ char sec_name[32];
+ const char *entry;
+ int n;
+
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s %d",
+ MRVL_TOK_PORT, port, MRVL_TOK_TC, tc);
+
+ /* Skip non-existing */
+ if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
+ return 0;
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].inq,
+ sizeof(cfg->port[port].tc[tc].inq[0]),
+ RTE_DIM(cfg->port[port].tc[tc].inq),
+ MRVL_PP2_RXQ_MAX);
+ if (n < 0) {
+ RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].inqs = n;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].pcp,
+ sizeof(cfg->port[port].tc[tc].pcp[0]),
+ RTE_DIM(cfg->port[port].tc[tc].pcp),
+ MAX_PCP);
+ if (n < 0) {
+ RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].pcps = n;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP);
+ if (entry) {
+ n = get_entry_values(entry,
+ cfg->port[port].tc[tc].dscp,
+ sizeof(cfg->port[port].tc[tc].dscp[0]),
+ RTE_DIM(cfg->port[port].tc[tc].dscp),
+ MAX_DSCP);
+ if (n < 0) {
+ RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ n, entry);
+ return n;
+ }
+ cfg->port[port].tc[tc].dscps = n;
+ }
+ return 0;
+}
+
+/**
+ * Parse QoS configuration - rte_kvargs_process handler.
+ *
+ * Opens configuration file and parses its content.
+ *
+ * @param key Unused.
+ * @param path Path to config file.
+ * @param extra_args Pointer to configuration structure.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
+ void *extra_args)
+{
+ struct mrvl_qos_cfg **cfg = extra_args;
+ struct rte_cfgfile *file = rte_cfgfile_load(path, 0);
+ uint32_t val;
+ int n, i, ret;
+ const char *entry;
+ char sec_name[32];
+
+ if (file == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path);
+
+ /* Create configuration. This is never accessed on the fast path,
+ * so we can ignore socket.
+ */
+ *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0);
+ if (*cfg == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n",
+ path);
+
+ n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT,
+ sizeof(MRVL_TOK_PORT) - 1);
+
+ if (n == 0) {
+ /* This is weird, but not bad. */
+ RTE_LOG(WARNING, PMD, "Empty configuration file?\n");
+ return 0;
+ }
+
+ /* Use the number of ports given as vdev parameters. */
+ for (n = 0; n < mrvl_ports_nb; ++n) {
+ snprintf(sec_name, sizeof(sec_name), "%s %d %s",
+ MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT);
+
+ /* Skip ports non-existing in configuration. */
+ if (rte_cfgfile_num_sections(file, sec_name,
+ strlen(sec_name)) <= 0) {
+ (*cfg)->port[n].use_global_defaults = 1;
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ continue;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_DEFAULT_TC);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0 ||
+ val > USHRT_MAX)
+ return -1;
+ (*cfg)->port[n].default_tc = (uint8_t)val;
+ } else {
+ RTE_LOG(ERR, PMD,
+ "Default Traffic Class required in custom configuration!\n");
+ return -1;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_MAPPING_PRIORITY);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_VLAN_IP,
+ sizeof(MRVL_TOK_VLAN_IP)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ else if (!strncmp(entry, MRVL_TOK_IP_VLAN,
+ sizeof(MRVL_TOK_IP_VLAN)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_IP_VLAN_PRI;
+ else if (!strncmp(entry, MRVL_TOK_IP,
+ sizeof(MRVL_TOK_IP)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_IP_PRI;
+ else if (!strncmp(entry, MRVL_TOK_VLAN,
+ sizeof(MRVL_TOK_VLAN)))
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_PRI;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Error in parsing %s value (%s)!\n",
+ MRVL_TOK_MAPPING_PRIORITY, entry);
+ } else {
+ (*cfg)->port[n].mapping_priority =
+ PP2_CLS_QOS_TBL_VLAN_IP_PRI;
+ }
+
+ for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) {
+ ret = get_outq_cfg(file, n, i, *cfg);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Error %d parsing port %d outq %d!\n",
+ ret, n, i);
+ }
+
+ for (i = 0; i < MRVL_PP2_TC_MAX; ++i) {
+ ret = parse_tc_cfg(file, n, i, *cfg);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Error %d parsing port %d tc %d!\n",
+ ret, n, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Setup Traffic Class.
+ *
+ * Fill in TC parameters in single MUSDK TC config entry.
+ * @param param TC parameters entry.
+ * @param inqs Number of MUSDK in-queues in this TC.
+ * @param bpool Bpool for this TC.
+ * @returns 0 in case of success, exits otherwise.
+ */
+static int
+setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
+ struct pp2_bpool *bpool)
+{
+ struct pp2_ppio_inq_params *inq_params;
+
+ param->pkt_offset = MRVL_PKT_OFFS;
+ param->pools[0] = bpool;
+
+ inq_params = rte_zmalloc_socket("inq_params",
+ inqs * sizeof(*inq_params),
+ 0, rte_socket_id());
+ if (!inq_params)
+ return -ENOMEM;
+
+ param->num_in_qs = inqs;
+
+ /* Release old config if necessary. */
+ if (param->inqs_params)
+ rte_free(param->inqs_params);
+
+ param->inqs_params = inq_params;
+
+ return 0;
+}
+
+/**
+ * Configure RX Queues in a given port.
+ *
+ * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues)
+{
+ size_t i, tc;
+
+ if (mrvl_qos_cfg == NULL ||
+ mrvl_qos_cfg->port[portid].use_global_defaults) {
+ /* No port configuration, use default: 1 TC, no QoS. */
+ priv->ppio_params.inqs_params.num_tcs = 1;
+ setup_tc(&priv->ppio_params.inqs_params.tcs_params[0],
+ max_queues, priv->bpool);
+
+ /* Direct mapping of queues i.e. 0->0, 1->1 etc. */
+ for (i = 0; i < max_queues; ++i) {
+ priv->rxq_map[i].tc = 0;
+ priv->rxq_map[i].inq = i;
+ }
+ return 0;
+ }
+
+ /* We need only a subset of configuration. */
+ struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+
+ priv->qos_tbl_params.type = port_cfg->mapping_priority;
+
+ /*
+ * We need to reverse mapping, from tc->pcp (better from usability
+ * point of view) to pcp->tc (configurable in MUSDK).
+ * First, set all map elements to "default".
+ */
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i)
+ priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc;
+
+ /* Then, fill in all known values. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) {
+ /* Better safe than sorry. */
+ RTE_LOG(ERR, PMD,
+ "Too many PCPs configured in TC %zu!\n", tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].pcps; ++i) {
+ priv->qos_tbl_params.pcp_cos_map[
+ port_cfg->tc[tc].pcp[i]].tc = tc;
+ }
+ }
+
+ /*
+ * The same logic goes with DSCP.
+ * First, set all map elements to "default".
+ */
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i)
+ priv->qos_tbl_params.dscp_cos_map[i].tc =
+ port_cfg->default_tc;
+
+ /* Fill in all known values. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) {
+ /* Better safe than sorry. */
+ RTE_LOG(ERR, PMD,
+ "Too many DSCPs configured in TC %zu!\n", tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].dscps; ++i) {
+ priv->qos_tbl_params.dscp_cos_map[
+ port_cfg->tc[tc].dscp[i]].tc = tc;
+ }
+ }
+
+ /*
+ * Surprisingly, similar logic goes with queue mapping.
+ * We need only to store qid->tc mapping,
+ * to know TC when queue is read.
+ */
+ for (i = 0; i < RTE_DIM(priv->rxq_map); ++i)
+ priv->rxq_map[i].tc = MRVL_UNKNOWN_TC;
+
+ /* Set up DPDKq->(TC,inq) mapping. */
+ for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
+ if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) {
+ /* Overflow. */
+ RTE_LOG(ERR, PMD,
+ "Too many RX queues configured per TC %zu!\n",
+ tc);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->tc[tc].inqs; ++i) {
+ uint8_t idx = port_cfg->tc[tc].inq[i];
+
+ if (idx > RTE_DIM(priv->rxq_map)) {
+ RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx);
+ return -1;
+ }
+
+ priv->rxq_map[idx].tc = tc;
+ priv->rxq_map[idx].inq = i;
+ }
+ }
+
+ /*
+ * Set up TC configuration. TCs need to be sequenced: 0, 1, 2
+ * with no gaps. Empty TC means end of processing.
+ */
+ for (i = 0; i < MRVL_PP2_TC_MAX; ++i) {
+ if (port_cfg->tc[i].inqs == 0)
+ break;
+ setup_tc(&priv->ppio_params.inqs_params.tcs_params[i],
+ port_cfg->tc[i].inqs,
+ priv->bpool);
+ }
+
+ priv->ppio_params.inqs_params.num_tcs = i;
+
+ return 0;
+}
+
+/**
+ * Start QoS mapping.
+ *
+ * Finalize QoS table configuration and initialize it in SDK. It can be done
+ * only after port is started, so we have a valid ppio reference.
+ *
+ * @param priv Port's private (configuration) data.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_start_qos_mapping(struct mrvl_priv *priv)
+{
+ size_t i;
+
+ if (priv->ppio == NULL) {
+ RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n");
+ return -1;
+ }
+
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i)
+ priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio;
+
+ for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i)
+ priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio;
+
+ /* Initialize Classifier QoS table. */
+
+ return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl);
+}
diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mrvl/mrvl_qos.h
new file mode 100644
index 00000000..ae7508c9
--- /dev/null
+++ b/drivers/net/mrvl/mrvl_qos.h
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MRVL_QOS_H_
+#define _MRVL_QOS_H_
+
+#include <rte_common.h>
+
+#include "mrvl_ethdev.h"
+
+/** Code Points per Traffic Class. Equals max(DSCP, PCP). */
+#define MRVL_CP_PER_TC (64)
+
+/** Value used as "unknown". */
+#define MRVL_UNKNOWN_TC (0xFF)
+
+/* QoS config. */
+struct mrvl_qos_cfg {
+ struct port_cfg {
+ struct {
+ uint8_t inq[MRVL_PP2_RXQ_MAX];
+ uint8_t dscp[MRVL_CP_PER_TC];
+ uint8_t pcp[MRVL_CP_PER_TC];
+ uint8_t inqs;
+ uint8_t dscps;
+ uint8_t pcps;
+ } tc[MRVL_PP2_TC_MAX];
+ struct {
+ uint8_t weight;
+ } outq[MRVL_PP2_RXQ_MAX];
+ enum pp2_cls_qos_tbl_type mapping_priority;
+ uint16_t inqs;
+ uint16_t outqs;
+ uint8_t default_tc;
+ uint8_t use_global_defaults;
+ } port[RTE_MAX_ETHPORTS];
+};
+
+/** Global QoS configuration. */
+extern struct mrvl_qos_cfg *mrvl_qos_cfg;
+
+/**
+ * Parse QoS configuration - rte_kvargs_process handler.
+ *
+ * Opens configuration file and parses its content.
+ *
+ * @param key Unused.
+ * @param path Path to config file.
+ * @param extra_args Pointer to configuration structure.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
+ void *extra_args);
+
+/**
+ * Configure RX Queues in a given port.
+ *
+ * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues);
+
+/**
+ * Start QoS mapping.
+ *
+ * Finalize QoS table configuration and initialize it in SDK. It can be done
+ * only after port is started, so we have a valid ppio reference.
+ *
+ * @param priv Port's private (configuration) data.
+ * @returns 0 in case of success, exits otherwise.
+ */
+int
+mrvl_start_qos_mapping(struct mrvl_priv *priv);
+
+#endif /* _MRVL_QOS_H_ */
diff --git a/drivers/net/mrvl/rte_pmd_mrvl_version.map b/drivers/net/mrvl/rte_pmd_mrvl_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/drivers/net/mrvl/rte_pmd_mrvl_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index 4ee2c2dc..4ba066ac 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -40,6 +40,9 @@ CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lm
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_nfp_version.map
@@ -49,5 +52,7 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nfpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nspu.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 92b03c4c..83dec061 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -55,6 +55,7 @@
#include <rte_alarm.h>
#include <rte_spinlock.h>
+#include "nfp_nfpu.h"
#include "nfp_net_pmd.h"
#include "nfp_net_logs.h"
#include "nfp_net_ctrl.h"
@@ -87,7 +88,7 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
static int nfp_net_start(struct rte_eth_dev *dev);
-static void nfp_net_stats_get(struct rte_eth_dev *dev,
+static int nfp_net_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void nfp_net_stats_reset(struct rte_eth_dev *dev);
static void nfp_net_stop(struct rte_eth_dev *dev);
@@ -99,13 +100,13 @@ static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* happen to be at the same offset on the NFP6000 and the NFP3200 so
* we use a single macro here.
*/
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff)))
+#define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff))
/* Maximum value which can be added to a queue with one transaction */
#define NFP_QCP_MAX_ADD 0x7f
#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
- (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+ (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
@@ -487,10 +488,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* Supporting VLAN insertion by default */
- if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
-
if (rxmode->jumbo_frame)
/* this is handled in rte_eth_dev_configure */
@@ -504,6 +501,32 @@ nfp_net_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* If next capabilities are supported, configure them by default */
+
+ /* VLAN insertion */
+ if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
+ new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
+ new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+
+ /* RX gather */
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
if (!new_ctrl)
return 0;
@@ -592,7 +615,55 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
}
-static void nfp_net_read_mac(struct nfp_net_hw *hw)
+#define ETH_ADDR_LEN 6
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ dst[ETH_ADDR_LEN - i - 1] = src[i];
+}
+
+static int
+nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
+{
+ union eth_table_entry *entry;
+ int idx, i;
+
+ idx = port;
+ entry = hw->eth_table;
+
+ /* Reading NFP ethernet table obtained before */
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
+ if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
+ /* port not in use */
+ entry++;
+ continue;
+ }
+ if (idx == 0)
+ break;
+ idx--;
+ entry++;
+ }
+
+ if (i == NSP_ETH_MAX_COUNT)
+ return -EINVAL;
+
+ /*
+ * hw points to port0 private data. We need hw now pointing to
+ * right port.
+ */
+ hw += port;
+ nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
+ (uint8_t *)&entry->mac_addr);
+
+ return 0;
+}
+
+static void
+nfp_net_vf_read_mac(struct nfp_net_hw *hw)
{
uint32_t tmp;
@@ -603,6 +674,20 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw)
memcpy(&hw->mac_addr[4], &tmp, 2);
}
+static void
+nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
+{
+ uint32_t mac0 = *(uint32_t *)mac;
+ uint16_t mac1;
+
+ nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
+
+ mac += 4;
+ mac1 = *(uint16_t *)mac;
+ nn_writew(rte_cpu_to_be_16(mac1),
+ hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
+}
+
static int
nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
@@ -627,14 +712,19 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
/* UIO just supports one queue and no LSC*/
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+ intr_handle->intr_vec[0] = 0;
} else {
PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
- for (i = 0; i < dev->data->nb_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
/*
* The first msix vector is reserved for non
* efd interrupts
*/
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+ intr_handle->intr_vec[i] = i + 1;
+ PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
+ intr_handle->intr_vec[i]);
+ }
}
/* Avoiding TX interrupts */
@@ -667,6 +757,11 @@ nfp_net_start(struct rte_eth_dev *dev)
/* check and configure queue intr-vector mapping */
if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (hw->pf_multiport_enabled) {
+ PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
+ "with NFP multiport PF");
+ return -EINVAL;
+ }
if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
/*
* Better not to share LSC with RX interrupts.
@@ -684,20 +779,17 @@ nfp_net_start(struct rte_eth_dev *dev)
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
- }
- if (rte_intr_dp_is_en(intr_handle))
nfp_configure_rx_interrupt(dev, intr_handle);
+ update = NFP_NET_CFG_UPDATE_MSIX;
+ }
rte_intr_enable(intr_handle);
/* Enable device */
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
- update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
- /* Just configuring queues interrupts when necessary */
- if (rte_intr_dp_is_en(intr_handle))
- update |= NFP_NET_CFG_UPDATE_MSIX;
+ update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
@@ -715,6 +807,10 @@ nfp_net_start(struct rte_eth_dev *dev)
goto error;
}
+ if (hw->is_pf)
+ /* Configure the physical port up */
+ nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1);
+
hw->ctrl = new_ctrl;
return 0;
@@ -743,9 +839,12 @@ static void
nfp_net_stop(struct rte_eth_dev *dev)
{
int i;
+ struct nfp_net_hw *hw;
PMD_INIT_LOG(DEBUG, "Stop");
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
nfp_net_disable_queues(dev);
/* Clear queues */
@@ -758,6 +857,10 @@ nfp_net_stop(struct rte_eth_dev *dev)
nfp_net_reset_rx_queue(
(struct nfp_net_rxq *)dev->data->rx_queues[i]);
}
+
+ if (hw->is_pf)
+ /* Configure the physical port down */
+ nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0);
}
/* Reset and stop device. The device can not be restarted. */
@@ -766,6 +869,7 @@ nfp_net_close(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
struct rte_pci_device *pci_dev;
+ int i;
PMD_INIT_LOG(DEBUG, "Close");
@@ -777,7 +881,18 @@ nfp_net_close(struct rte_eth_dev *dev)
* threads/queues before calling the device close function.
*/
- nfp_net_stop(dev);
+ nfp_net_disable_queues(dev);
+
+ /* Clear queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ nfp_net_reset_tx_queue(
+ (struct nfp_net_txq *)dev->data->tx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ nfp_net_reset_rx_queue(
+ (struct nfp_net_rxq *)dev->data->rx_queues[i]);
+ }
rte_intr_disable(&pci_dev->intr_handle);
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
@@ -895,17 +1010,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
NFP_NET_CFG_STS_LINK_RATE_MASK;
- if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
- ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
- (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
- /* We really do not know the speed wil old firmware */
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
link.link_speed = ETH_SPEED_NUM_NONE;
- else {
- if (nn_link_status >= RTE_DIM(ls_to_ethtool))
- link.link_speed = ETH_SPEED_NUM_NONE;
- else
- link.link_speed = ls_to_ethtool[nn_link_status];
- }
+ else
+ link.link_speed = ls_to_ethtool[nn_link_status];
if (old.link_status != link.link_status) {
nfp_net_dev_atomic_write_link_status(dev, &link);
@@ -919,7 +1027,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
return -1;
}
-static void
+static int
nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int i;
@@ -1005,8 +1113,11 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
- if (stats)
+ if (stats) {
memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+ return 0;
+ }
+ return -EINVAL;
}
static void
@@ -1125,6 +1236,11 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+ dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_NONFRAG_IPV6_UDP;
+
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
@@ -1240,12 +1356,12 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
nfp_net_dev_atomic_read_link_status(dev, &link);
if (link.link_status)
RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
- (int)(dev->data->port_id), (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX
? "full-duplex" : "half-duplex");
else
RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
- (int)(dev->data->port_id));
+ dev->data->port_id);
RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
pci_dev->addr.domain, pci_dev->addr.bus,
@@ -1446,7 +1562,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
}
/* Saving physical and virtual addresses for the RX ring */
- rxq->dma = (uint64_t)tz->phys_addr;
+ rxq->dma = (uint64_t)tz->iova;
rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to RX descriptors */
@@ -1549,7 +1665,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
"tx_free_thresh must be less than the number of TX "
"descriptors. (tx_free_thresh=%u port=%d "
"queue=%d)\n", (unsigned int)tx_free_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1602,7 +1718,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
- txq->dma = (uint64_t)tz->phys_addr;
+ txq->dma = (uint64_t)tz->iova;
txq->txds = (struct nfp_net_tx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
@@ -1720,6 +1836,8 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
#define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
+#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
+
/*
* nfp_net_set_hash - Set mbuf hash data
*
@@ -1730,18 +1848,57 @@ static inline void
nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
struct rte_mbuf *mbuf)
{
- uint32_t hash;
- uint32_t hash_type;
struct nfp_net_hw *hw = rxq->hw;
+ uint8_t *meta_offset;
+ uint32_t meta_info;
+ uint32_t hash = 0;
+ uint32_t hash_type = 0;
if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
return;
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
+ if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+ hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
- hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
- hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
+ } else if (NFP_DESC_META_LEN(rxd)) {
+ /*
+ * new metadata api:
+ * <---- 32 bit ----->
+ * m field type word
+ * e data field #2
+ * t data field #1
+ * a data field #0
+ * ====================
+ * packet data
+ *
+ * Field type word contains up to 8 4bit field types
+ * A 4bit field type refers to a data field word
+ * A data field word can have several 4bit field types
+ */
+ meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ meta_offset -= NFP_DESC_META_LEN(rxd);
+ meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+ meta_offset += 4;
+ /* NFP PMD just supports metadata for hashing */
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ /* next field type is about the hash type */
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ /* hash value is in the data field */
+ hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+ hash_type = meta_info & NFP_NET_META_FIELD_MASK;
+ break;
+ default:
+ /* Unsupported metadata can be a performance issue */
+ return;
+ }
+ } else {
+ return;
+ }
mbuf->hash.rss = hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
@@ -1847,9 +2004,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
- RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned)rxq->port_id,
- (unsigned)rxq->qidx);
+ RTE_LOG_DP(DEBUG, PMD,
+ "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+ rxq->port_id, (unsigned int)rxq->qidx);
nfp_net_mbuf_alloc_failed(rxq);
break;
}
@@ -1910,7 +2067,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
/* Adding the mbuff to the mbuff array passed by the app */
@@ -1933,7 +2090,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return nb_hold;
PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
+ rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
nb_hold += rxq->nb_rx_hold;
@@ -1944,7 +2101,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx,
+ rxq->port_id, (unsigned int)rxq->qidx,
(unsigned)nb_hold, (unsigned)avail);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
nb_hold = 0;
@@ -2094,7 +2251,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*/
pkt_size = pkt->pkt_len;
- while (pkt_size) {
+ while (pkt) {
/* Copying TSO, VLAN and cksum info */
*txds = txd;
@@ -2109,7 +2266,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*lmbuf = pkt;
dma_size = pkt->data_len;
- dma_addr = rte_mbuf_data_dma_addr(pkt);
+ dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "\n", dma_addr);
@@ -2126,13 +2283,13 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->wr_p = 0;
pkt_size -= dma_size;
- if (!pkt_size) {
+ if (!pkt_size)
/* End of packet */
txds->offset_eop |= PCIE_DESC_TX_EOP;
- } else {
+ else
txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
- pkt = pkt->next;
- }
+
+ pkt = pkt->next;
/* Referencing next free TX descriptor */
txds = &txq->txds[txq->wr_p];
lmbuf = &txq->txbufs[txq->wr_p].mbuf;
@@ -2149,11 +2306,12 @@ xmit_end:
return i;
}
-static void
+static int
nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
uint32_t new_ctrl, update;
struct nfp_net_hw *hw;
+ int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
new_ctrl = 0;
@@ -2174,14 +2332,15 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
if (new_ctrl == 0)
- return;
+ return 0;
update = NFP_NET_CFG_UPDATE_GEN;
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return;
+ ret = nfp_net_reconfig(hw, new_ctrl, update);
+ if (!ret)
+ hw->ctrl = new_ctrl;
- hw->ctrl = new_ctrl;
+ return ret;
}
/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
@@ -2233,7 +2392,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
reta &= ~(0xFF << (8 * j));
reta |= reta_conf[idx].reta[shift + j] << (8 * j);
}
- nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+ nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+ reta);
}
update = NFP_NET_CFG_UPDATE_RSS;
@@ -2280,7 +2440,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
if (!mask)
continue;
- reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+ reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+ shift);
for (j = 0; j < 4; j++) {
if (!(mask & (0x1 << j)))
continue;
@@ -2330,6 +2491,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
NFP_NET_CFG_RSS_IPV6_TCP |
NFP_NET_CFG_RSS_IPV6_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
/* configuring where to apply the RSS hash */
nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
@@ -2426,19 +2590,76 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
+/*
+ * All eth_dev created got its private data, but before nfp_net_init, that
+ * private data is referencing private data for all the PF ports. This is due
+ * to how the vNIC bars are mapped based on first port, so all ports need info
+ * about port 0 private data. Inside nfp_net_init the private data pointer is
+ * changed to the right address for each port once the bars have been mapped.
+ *
+ * This functions helps to find out which port and therefore which offset
+ * inside the private data array to use.
+ */
+static int
+get_pf_port_number(char *name)
+{
+ char *pf_str = name;
+ int size = 0;
+
+ while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
+ pf_str++;
+
+ if (size == 30)
+ /*
+ * This should not happen at all and it would mean major
+ * implementation fault.
+ */
+ rte_panic("nfp_net: problem with pf device name\n");
+
+ /* Expecting _portX with X within [0,7] */
+ pf_str += 5;
+
+ return (int)strtol(pf_str, NULL, 10);
+}
+
static int
nfp_net_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *hw, *hwport0;
- uint32_t tx_bar_off, rx_bar_off;
+ uint64_t tx_bar_off = 0, rx_bar_off = 0;
uint32_t start_q;
int stride = 4;
+ nspu_desc_t *nspu_desc = NULL;
+ uint64_t bar_offset;
+ int port = 0;
+
PMD_INIT_FUNC_TRACE();
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+ (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+ port = get_pf_port_number(eth_dev->data->name);
+ if (port < 0 || port > 7) {
+ RTE_LOG(ERR, PMD, "Port value is wrong\n");
+ return -ENODEV;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
+
+ /* This points to port 0 private data */
+ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ /* This points to the specific port private data */
+ hw = &hwport0[port];
+ hw->pf_port_idx = port;
+ } else {
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ hwport0 = 0;
+ }
eth_dev->dev_ops = &nfp_net_eth_dev_ops;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
@@ -2448,9 +2669,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -2468,11 +2687,42 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
"hw->ctrl_bar is NULL. BAR0 not configured\n");
return -ENODEV;
}
+
+ if (hw->is_pf && port == 0) {
+ nspu_desc = hw->nspu_desc;
+
+ if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) {
+ /*
+ * A firmware should be there after PF probe so this
+ * should not happen.
+ */
+ RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n");
+ return -ENODEV;
+ }
+
+ /* vNIC PF control BAR is a subset of PF PCI device BAR */
+ hw->ctrl_bar += bar_offset;
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+ }
+
+ if (port > 0) {
+ if (!hwport0->ctrl_bar)
+ return -ENODEV;
+
+ /* address based on port0 offset */
+ hw->ctrl_bar = hwport0->ctrl_bar +
+ (port * NFP_PF_CSR_SLICE_SIZE);
+ }
+
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+
hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
/* Work out where in the BAR the queues start. */
switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_NFP4000_PF_NIC:
+ case PCI_DEVICE_ID_NFP6000_PF_NIC:
case PCI_DEVICE_ID_NFP6000_VF_NIC:
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
tx_bar_off = NFP_PCIE_QUEUE(start_q);
@@ -2484,11 +2734,34 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
+
+ if (hw->is_pf && port == 0) {
+ /* configure access to tx/rx vNIC BARs */
+ nfp_nsp_map_queues_bar(nspu_desc, &bar_offset);
+ PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n",
+ bar_offset);
+ hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr;
- hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
- hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
+ /* vNIC PF tx/rx BARs are a subset of PF PCI device */
+ hwport0->hw_queues += bar_offset;
+
+ /* Lets seize the chance to read eth table from hw */
+ if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
+ return -ENODEV;
+ }
+
+ if (hw->is_pf) {
+ hw->tx_bar = hwport0->hw_queues + tx_bar_off;
+ hw->rx_bar = hwport0->hw_queues + rx_bar_off;
+ eth_dev->data->dev_private = hw;
+ } else {
+ hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ tx_bar_off;
+ hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
+ rx_bar_off;
+ }
PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
@@ -2508,8 +2781,10 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
+ hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
@@ -2537,11 +2812,18 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
- nfp_net_read_mac(hw);
+ if (hw->is_pf) {
+ nfp_net_pf_read_mac(hwport0, port);
+ nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+ } else {
+ nfp_net_vf_read_mac(hw);
+ }
- if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
/* Using random mac addresses for VFs */
eth_random_addr(&hw->mac_addr[0]);
+ nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
+ }
/* Copying mac address to DPDK eth_dev struct */
ether_addr_copy((struct ether_addr *)hw->mac_addr,
@@ -2568,12 +2850,154 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return 0;
}
-static const struct rte_pci_id pci_id_nfp_net_map[] = {
+static int
+nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
+ nfpu_desc_t *nfpu_desc, void **priv)
+{
+ struct rte_eth_dev *eth_dev;
+ struct nfp_net_hw *hw;
+ char *port_name;
+ int ret;
+
+ port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
+ if (!port_name)
+ return -ENOMEM;
+
+ if (ports > 1)
+ sprintf(port_name, "%s_port%d", dev->device.name, port);
+ else
+ sprintf(port_name, "%s", dev->device.name);
+
+ eth_dev = rte_eth_dev_allocate(port_name);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ if (port == 0) {
+ *priv = rte_zmalloc(port_name,
+ sizeof(struct nfp_net_adapter) * ports,
+ RTE_CACHE_LINE_SIZE);
+ if (!*priv) {
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+ }
+
+ eth_dev->data->dev_private = *priv;
+
+ /*
+ * dev_private pointing to port0 dev_private because we need
+ * to configure vNIC bars based on port0 at nfp_net_init.
+ * Then dev_private is adjusted per port.
+ */
+ hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
+ hw->nspu_desc = nfpu_desc->nspu;
+ hw->nfpu_desc = nfpu_desc;
+ hw->is_pf = 1;
+ if (ports > 1)
+ hw->pf_multiport_enabled = 1;
+
+ eth_dev->device = &dev->device;
+ rte_eth_copy_pci_info(eth_dev, dev);
+
+ ret = nfp_net_init(eth_dev);
+
+ if (ret)
+ rte_eth_dev_release_port(eth_dev);
+
+ rte_free(port_name);
+
+ return ret;
+}
+
+static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *dev)
+{
+ nfpu_desc_t *nfpu_desc;
+ nspu_desc_t *nspu_desc;
+ uint64_t offset_symbol;
+ uint8_t *bar_offset;
+ int major, minor;
+ int total_ports;
+ void *priv = 0;
+ int ret = -ENODEV;
+ int i;
+
+ if (!dev)
+ return ret;
+
+ nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0);
+ if (!nfpu_desc)
+ return -ENOMEM;
+
+ if (nfpu_open(dev, nfpu_desc, 0) < 0) {
+ RTE_LOG(ERR, PMD,
+ "nfpu_open failed\n");
+ goto nfpu_error;
+ }
+
+ nspu_desc = nfpu_desc->nspu;
+
+
+ /* Check NSP ABI version */
+ if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) {
+ RTE_LOG(INFO, PMD, "NFP NSP not present\n");
+ goto error;
+ }
+ PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor);
+
+ if ((major == 0) && (minor < 20)) {
+ RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n");
+ goto error;
+ }
+
+ ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports",
+ &offset_symbol);
+ if (ret)
+ goto error;
+
+ bar_offset = (uint8_t *)dev->mem_resource[0].addr;
+ bar_offset += offset_symbol;
+ total_ports = (uint32_t)*bar_offset;
+ PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
+
+ if (total_ports <= 0 || total_ports > 8) {
+ RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ for (i = 0; i < total_ports; i++) {
+ ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ nfpu_close(nfpu_desc);
+nfpu_error:
+ rte_free(nfpu_desc);
+
+ return ret;
+}
+
+static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP4000_PF_NIC)
+ },
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
PCI_DEVICE_ID_NFP6000_PF_NIC)
},
{
+ .vendor_id = 0,
+ },
+};
+
+static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
+ {
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
PCI_DEVICE_ID_NFP6000_VF_NIC)
},
@@ -2591,19 +3015,45 @@ static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
{
+ struct rte_eth_dev *eth_dev;
+ struct nfp_net_hw *hw, *hwport0;
+ int port = 0;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
+ (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
+ port = get_pf_port_number(eth_dev->data->name);
+ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ hw = &hwport0[port];
+ } else {
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ }
+ /* hotplug is not possible with multiport PF */
+ if (hw->pf_multiport_enabled)
+ return -ENOTSUP;
return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}
-static struct rte_pci_driver rte_nfp_net_pmd = {
- .id_table = pci_id_nfp_net_map,
+static struct rte_pci_driver rte_nfp_net_pf_pmd = {
+ .id_table = pci_id_nfp_pf_net_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = nfp_pf_pci_probe,
+ .remove = eth_nfp_pci_remove,
+};
+
+static struct rte_pci_driver rte_nfp_net_vf_pmd = {
+ .id_table = pci_id_nfp_vf_net_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.probe = eth_nfp_pci_probe,
.remove = eth_nfp_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
+RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
/*
* Local variables:
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index 2c500433..1ebd99ca 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -52,6 +52,13 @@
/* Offset in Freelist buffer where packet starts on RX */
#define NFP_NET_RX_OFFSET 32
+/* working with metadata api (NFD version > 3.0) */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1)
+
+/* Prepend field types */
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+
/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
@@ -327,6 +334,9 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
+/* PF multiport offset */
+#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
+
#endif /* _NFP_NET_CTRL_H_ */
/*
* Local variables:
diff --git a/drivers/net/nfp/nfp_net_eth.h b/drivers/net/nfp/nfp_net_eth.h
new file mode 100644
index 00000000..af57f03c
--- /dev/null
+++ b/drivers/net/nfp/nfp_net_eth.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_net_eth.h
+ *
+ * Netronome NFP_NET PDM driver
+ */
+
+union eth_table_entry {
+ struct {
+ uint64_t port;
+ uint64_t state;
+ uint8_t mac_addr[6];
+ uint8_t resv[2];
+ uint64_t control;
+ };
+ uint64_t raw[4];
+};
+
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif
+
+#define NSP_ETH_NBI_PORT_COUNT 24
+#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry))
+
+#define NSP_ETH_PORT_LANES 0xf
+#define NSP_ETH_PORT_INDEX 0xff00
+#define NSP_ETH_PORT_LABEL 0x3f000000000000
+#define NSP_ETH_PORT_PHYLABEL 0xfc0000000000000
+
+#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_STATE_RATE 0xf00
+#define NSP_ETH_STATE_INTERFACE 0xff000
+#define NSP_ETH_STATE_MEDIA 0x300000
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG 0x3800000
+
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index eec56bc1..1ae0ea62 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -42,6 +42,7 @@
#define NFP_NET_PMD_VERSION "0.1"
#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NFP4000_PF_NIC 0x4000
#define PCI_DEVICE_ID_NFP6000_PF_NIC 0x6000
#define PCI_DEVICE_ID_NFP6000_VF_NIC 0x6003
@@ -143,6 +144,11 @@ static inline void nn_writel(uint32_t val, volatile void *addr)
rte_write32(val, addr);
}
+static inline void nn_writew(uint16_t val, volatile void *addr)
+{
+ rte_write16(val, addr);
+}
+
static inline uint64_t nn_readq(volatile void *addr)
{
const volatile uint32_t *p = addr;
@@ -250,7 +256,7 @@ struct nfp_net_txq {
uint32_t tx_hthresh; /* not used by now. Future? */
uint32_t tx_wthresh; /* not used by now. Future? */
uint32_t txq_flags; /* not used by now. Future? */
- uint8_t port_id;
+ uint16_t port_id;
int qidx;
int tx_qcidx;
__le64 dma;
@@ -431,6 +437,13 @@ struct nfp_net_hw {
struct nfp_cpp_area *rx_area;
struct nfp_cpp_area *msix_area;
#endif
+ uint8_t *hw_queues;
+ uint8_t is_pf;
+ uint8_t pf_port_idx;
+ uint8_t pf_multiport_enabled;
+ union eth_table_entry *eth_table;
+ nspu_desc_t *nspu_desc;
+ nfpu_desc_t *nfpu_desc;
};
struct nfp_net_adapter {
diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c
new file mode 100644
index 00000000..5775d8da
--- /dev/null
+++ b/drivers/net/nfp/nfp_nfpu.c
@@ -0,0 +1,103 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/types.h>
+
+#include <rte_bus_pci.h>
+#include <rte_malloc.h>
+
+#include "nfp_nfpu.h"
+
+/* PF BAR and expansion BAR for the NSP interface */
+#define NFP_CFG_PCIE_BAR 0
+#define NFP_CFG_EXP_BAR 7
+
+#define NFP_CFG_EXP_BAR_CFG_BASE 0x30000
+
+/* There could be other NFP userspace tools using the NSP interface.
+ * Make sure there is no other process using it and locking the access for
+ * avoiding problems.
+ */
+static int
+nspv_aquire_process_lock(nfpu_desc_t *desc)
+{
+ int rc;
+ struct flock lock;
+ char lockname[30];
+
+ memset(&lock, 0, sizeof(lock));
+
+ snprintf(lockname, sizeof(lockname), "/var/lock/nfp%d", desc->nfp);
+
+ /* Using S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH */
+ desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
+
+ if (desc->lock < 0)
+ return desc->lock;
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ rc = -1;
+ while (rc != 0) {
+ rc = fcntl(desc->lock, F_SETLK, &lock);
+ if (rc < 0) {
+ if ((errno != EAGAIN) && (errno != EACCES)) {
+ close(desc->lock);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp)
+{
+ void *cfg_base, *mem_base;
+ size_t barsz;
+ int ret = 0;
+ int i = 0;
+
+ desc->nfp = nfp;
+
+ ret = nspv_aquire_process_lock(desc);
+ if (ret)
+ return -1;
+
+ barsz = pci_dev->mem_resource[0].len;
+
+ /* barsz in log2 */
+ while (barsz >>= 1)
+ i++;
+ barsz = i;
+
+ /* Getting address for NFP expansion BAR registers */
+ cfg_base = pci_dev->mem_resource[0].addr;
+ cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE;
+
+ /* Getting address for NFP NSP interface registers */
+ mem_base = pci_dev->mem_resource[0].addr;
+ mem_base = (uint8_t *)mem_base + (NFP_CFG_EXP_BAR << (barsz - 3));
+
+
+ desc->nspu = rte_malloc("nfp nspu", sizeof(nspu_desc_t), 0);
+ nfp_nspu_init(desc->nspu, desc->nfp, NFP_CFG_PCIE_BAR, barsz,
+ NFP_CFG_EXP_BAR, cfg_base, mem_base);
+
+ return ret;
+}
+
+int
+nfpu_close(nfpu_desc_t *desc)
+{
+ rte_free(desc->nspu);
+ close(desc->lock);
+ unlink("/var/lock/nfp0");
+ return 0;
+}
diff --git a/drivers/net/nfp/nfp_nfpu.h b/drivers/net/nfp/nfp_nfpu.h
new file mode 100644
index 00000000..e56fa099
--- /dev/null
+++ b/drivers/net/nfp/nfp_nfpu.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_nfpu.h
+ *
+ * Netronome NFP_NET PDM driver
+ */
+
+/*
+ * NFP User interface creates a window for talking with NFP NSP processor
+ */
+
+
+#include <rte_bus_pci.h>
+#include "nfp_nspu.h"
+
+typedef struct {
+ int nfp;
+ int lock;
+ nspu_desc_t *nspu;
+} nfpu_desc_t;
+
+int nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp);
+int nfpu_close(nfpu_desc_t *desc);
diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c
new file mode 100644
index 00000000..6ba940cb
--- /dev/null
+++ b/drivers/net/nfp/nfp_nspu.c
@@ -0,0 +1,623 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <rte_log.h>
+#include <rte_byteorder.h>
+
+#include "nfp_nfpu.h"
+
+#define CFG_EXP_BAR_ADDR_SZ 1
+#define CFG_EXP_BAR_MAP_TYPE 1
+
+#define EXP_BAR_TARGET_SHIFT 23
+#define EXP_BAR_LENGTH_SHIFT 27 /* 0=32, 1=64 bit increment */
+#define EXP_BAR_MAP_TYPE_SHIFT 29 /* Bulk BAR map */
+
+/* NFP target for NSP access */
+#define NFP_NSP_TARGET 7
+
+/* Expansion BARs for mapping PF vnic BARs */
+#define NFP_NET_PF_CFG_EXP_BAR 6
+#define NFP_NET_PF_HW_QUEUES_EXP_BAR 5
+
+/*
+ * This is an NFP internal address used for configuring properly an NFP
+ * expansion BAR.
+ */
+#define MEM_CMD_BASE_ADDR 0x8100000000
+
+/* NSP interface registers */
+#define NSP_BASE (MEM_CMD_BASE_ADDR + 0x22100)
+#define NSP_STATUS 0x00
+#define NSP_COMMAND 0x08
+#define NSP_BUFFER 0x10
+#define NSP_DEFAULT_BUF 0x18
+#define NSP_DEFAULT_BUF_CFG 0x20
+
+#define NSP_MAGIC 0xab10
+#define NSP_STATUS_MAGIC(x) (((x) >> 48) & 0xffff)
+#define NSP_STATUS_MAJOR(x) (int)(((x) >> 44) & 0xf)
+#define NSP_STATUS_MINOR(x) (int)(((x) >> 32) & 0xfff)
+
+/* NSP commands */
+#define NSP_CMD_RESET 1
+#define NSP_CMD_FW_LOAD 6
+#define NSP_CMD_READ_ETH_TABLE 7
+#define NSP_CMD_WRITE_ETH_TABLE 8
+#define NSP_CMD_GET_SYMBOL 14
+
+#define NSP_BUFFER_CFG_SIZE_MASK (0xff)
+
+#define NSP_REG_ADDR(d, off, reg) ((uint8_t *)(d)->mem_base + (off) + (reg))
+#define NSP_REG_VAL(p) (*(uint64_t *)(p))
+
+/*
+ * An NFP expansion BAR is configured for allowing access to a specific NFP
+ * target:
+ *
+ * IN:
+ * desc: struct with basic NSP addresses to work with
+ * expbar: NFP PF expansion BAR index to configure
+ * tgt: NFP target to configure access
+ * addr: NFP target address
+ *
+ * OUT:
+ * pcie_offset: NFP PCI BAR offset to work with
+ */
+static void
+nfp_nspu_mem_bar_cfg(nspu_desc_t *desc, int expbar, int tgt,
+ uint64_t addr, uint64_t *pcie_offset)
+{
+ uint64_t x, y, barsz;
+ uint32_t *expbar_ptr;
+
+ barsz = desc->barsz;
+
+ /*
+ * NFP CPP address to configure. This comes from NFP 6000
+ * datasheet document based on Bulk mapping.
+ */
+ x = (addr >> (barsz - 3)) << (21 - (40 - (barsz - 3)));
+ x |= CFG_EXP_BAR_MAP_TYPE << EXP_BAR_MAP_TYPE_SHIFT;
+ x |= CFG_EXP_BAR_ADDR_SZ << EXP_BAR_LENGTH_SHIFT;
+ x |= tgt << EXP_BAR_TARGET_SHIFT;
+
+ /* Getting expansion bar configuration register address */
+ expbar_ptr = (uint32_t *)desc->cfg_base;
+ /* Each physical PCI BAR has 8 NFP expansion BARs */
+ expbar_ptr += (desc->pcie_bar * 8) + expbar;
+
+ /* Writing to the expansion BAR register */
+ *expbar_ptr = (uint32_t)x;
+
+ /* Getting the pcie offset to work with from userspace */
+ y = addr & ((uint64_t)(1 << (barsz - 3)) - 1);
+ *pcie_offset = y;
+}
+
+/*
+ * Configuring an expansion bar for accessing NSP userspace interface. This
+ * function configures always the same expansion bar, which implies access to
+ * previously configured NFP target is lost.
+ */
+static void
+nspu_xlate(nspu_desc_t *desc, uint64_t addr, uint64_t *pcie_offset)
+{
+ nfp_nspu_mem_bar_cfg(desc, desc->exp_bar, NFP_NSP_TARGET, addr,
+ pcie_offset);
+}
+
+int
+nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor)
+{
+ uint64_t pcie_offset;
+ uint64_t nsp_reg;
+
+ nspu_xlate(desc, NSP_BASE, &pcie_offset);
+ nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, pcie_offset, NSP_STATUS));
+
+ if (NSP_STATUS_MAGIC(nsp_reg) != NSP_MAGIC)
+ return -1;
+
+ *major = NSP_STATUS_MAJOR(nsp_reg);
+ *minor = NSP_STATUS_MINOR(nsp_reg);
+
+ return 0;
+}
+
+int
+nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
+ int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap)
+{
+ uint64_t offset, buffaddr;
+ uint64_t nsp_reg;
+
+ desc->nfp = nfp;
+ desc->pcie_bar = pcie_bar;
+ desc->exp_bar = exp_bar;
+ desc->barsz = pcie_barsz;
+ desc->windowsz = 1 << (desc->barsz - 3);
+ desc->cfg_base = exp_bar_cfg_base;
+ desc->mem_base = exp_bar_mmap;
+
+ nspu_xlate(desc, NSP_BASE, &offset);
+
+ /*
+ * Other NSPU clients can use other buffers. Let's tell NSPU we use the
+ * default buffer.
+ */
+ buffaddr = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF));
+ NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_BUFFER)) = buffaddr;
+
+ /* NFP internal addresses are 40 bits. Clean all other bits here */
+ buffaddr = buffaddr & (((uint64_t)1 << 40) - 1);
+ desc->bufaddr = buffaddr;
+
+ /* Lets get information about the buffer */
+ nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF_CFG));
+
+ /* Buffer size comes in MBs. Coversion to bytes */
+ desc->buf_size = ((size_t)nsp_reg & NSP_BUFFER_CFG_SIZE_MASK) << 20;
+
+ return 0;
+}
+
+#define NSPU_NFP_BUF(addr, base, off) \
+ (*(uint64_t *)((uint8_t *)(addr)->mem_base + ((base) | (off))))
+
+#define NSPU_HOST_BUF(base, off) (*(uint64_t *)((uint8_t *)(base) + (off)))
+
+static int
+nspu_buff_write(nspu_desc_t *desc, void *buffer, size_t size)
+{
+ uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
+ uint64_t windowsz = desc->windowsz;
+ uint64_t buffaddr, j, i = 0;
+ int ret = 0;
+
+ if (size > desc->buf_size)
+ return -1;
+
+ buffaddr = desc->bufaddr;
+ windowsz = desc->windowsz;
+
+ while (i < size) {
+ /* Expansion bar reconfiguration per window size */
+ nspu_xlate(desc, buffaddr + i, &pcie_offset);
+ pcie_window_base = pcie_offset & (~(windowsz - 1));
+ pcie_window_offset = pcie_offset & (windowsz - 1);
+ for (j = pcie_window_offset; ((j < windowsz) && (i < size));
+ j += 8) {
+ NSPU_NFP_BUF(desc, pcie_window_base, j) =
+ NSPU_HOST_BUF(buffer, i);
+ i += 8;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nspu_buff_read(nspu_desc_t *desc, void *buffer, size_t size)
+{
+ uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
+ uint64_t windowsz, i = 0, j;
+ uint64_t buffaddr;
+ int ret = 0;
+
+ if (size > desc->buf_size)
+ return -1;
+
+ buffaddr = desc->bufaddr;
+ windowsz = desc->windowsz;
+
+ while (i < size) {
+ /* Expansion bar reconfiguration per window size */
+ nspu_xlate(desc, buffaddr + i, &pcie_offset);
+ pcie_window_base = pcie_offset & (~(windowsz - 1));
+ pcie_window_offset = pcie_offset & (windowsz - 1);
+ for (j = pcie_window_offset; ((j < windowsz) && (i < size));
+ j += 8) {
+ NSPU_HOST_BUF(buffer, i) =
+ NSPU_NFP_BUF(desc, pcie_window_base, j);
+ i += 8;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nspu_command(nspu_desc_t *desc, uint16_t cmd, int read, int write,
+ void *buffer, size_t rsize, size_t wsize)
+{
+ uint64_t status, cmd_reg;
+ uint64_t offset;
+ int retry = 0;
+ int retries = 120;
+ int ret = 0;
+
+ /* Same expansion BAR is used for different things */
+ nspu_xlate(desc, NSP_BASE, &offset);
+
+ status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
+
+ while ((status & 0x1) && (retry < retries)) {
+ status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
+ retry++;
+ sleep(1);
+ }
+
+ if (retry == retries)
+ return -1;
+
+ if (write) {
+ ret = nspu_buff_write(desc, buffer, wsize);
+ if (ret)
+ return ret;
+
+ /* Expansion BAR changes when writing the buffer */
+ nspu_xlate(desc, NSP_BASE, &offset);
+ }
+
+ NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)) =
+ (uint64_t)wsize << 32 | (uint64_t)cmd << 16 | 1;
+
+ retry = 0;
+
+ cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
+ while ((cmd_reg & 0x1) && (retry < retries)) {
+ cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
+ retry++;
+ sleep(1);
+ }
+ if (retry == retries)
+ return -1;
+
+ retry = 0;
+ status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
+ while ((status & 0x1) && (retry < retries)) {
+ status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
+ retry++;
+ sleep(1);
+ }
+
+ if (retry == retries)
+ return -1;
+
+ ret = status & (0xff << 8);
+ if (ret)
+ return ret;
+
+ if (read) {
+ ret = nspu_buff_read(desc, buffer, rsize);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+nfp_fw_reset(nspu_desc_t *nspu_desc)
+{
+ int res;
+
+ res = nspu_command(nspu_desc, NSP_CMD_RESET, 0, 0, 0, 0, 0);
+
+ if (res < 0)
+ RTE_LOG(INFO, PMD, "fw reset failed: error %d", res);
+
+ return res;
+}
+
+#define DEFAULT_FW_PATH "/lib/firmware/netronome"
+#define DEFAULT_FW_FILENAME "nic_dpdk_default.nffw"
+
+static int
+nfp_fw_upload(nspu_desc_t *nspu_desc)
+{
+ int fw_f;
+ char *fw_buf;
+ char filename[100];
+ struct stat file_stat;
+ off_t fsize, bytes;
+ ssize_t size;
+ int ret;
+
+ size = nspu_desc->buf_size;
+
+ sprintf(filename, "%s/%s", DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
+ fw_f = open(filename, O_RDONLY);
+ if (fw_f < 0) {
+ RTE_LOG(INFO, PMD, "Firmware file %s/%s not found.",
+ DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
+ return -ENOENT;
+ }
+
+ fstat(fw_f, &file_stat);
+
+ fsize = file_stat.st_size;
+ RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n",
+ (uint64_t)fsize);
+
+ if (fsize > (off_t)size) {
+ RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64
+ " bytes (%" PRIu64 " max)",
+ (uint64_t)fsize, (uint64_t)size);
+ return -EINVAL;
+ }
+
+ fw_buf = malloc((size_t)size);
+ if (!fw_buf) {
+ RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
+ return -ENOMEM;
+ }
+ memset(fw_buf, 0, size);
+
+ bytes = read(fw_f, fw_buf, fsize);
+ if (bytes != fsize) {
+ RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n"
+ "Just %" PRIu64 " of %" PRIu64 " bytes read.",
+ (uint64_t)bytes, (uint64_t)fsize);
+ free(fw_buf);
+ return -EIO;
+ }
+
+ ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes);
+
+ free(fw_buf);
+
+ return ret;
+}
+
+/* Firmware symbol descriptor size */
+#define NFP_SYM_DESC_LEN 40
+
+#define SYMBOL_DATA(b, off) (*(int64_t *)((b) + (off)))
+#define SYMBOL_UDATA(b, off) (*(uint64_t *)((b) + (off)))
+
+/* Firmware symbols contain information about how to access what they
+ * represent. It can be as simple as an numeric variable declared at a
+ * specific NFP memory, but it can also be more complex structures and
+ * related to specific hardware functionalities or components. Target,
+ * domain and address allow to create the BAR window for accessing such
+ * hw object and size defines the length to map.
+ *
+ * A vNIC is a network interface implemented inside the NFP and using a
+ * subset of device PCI BARs. Specific firmware symbols allow to map those
+ * vNIC bars by host drivers like the NFP PMD.
+ *
+ * Accessing what the symbol represents implies to map the access through
+ * a PCI BAR window. NFP expansion BARs are used in this regard through
+ * the NSPU interface.
+ */
+static int
+nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl,
+ uint32_t expbar, uint64_t *pcie_offset,
+ ssize_t *size)
+{
+ int64_t type;
+ int64_t target;
+ int64_t domain;
+ uint64_t addr;
+ char *sym_buf;
+ int ret = 0;
+
+ sym_buf = malloc(desc->buf_size);
+ strncpy(sym_buf, symbl, strlen(symbl));
+ ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf,
+ NFP_SYM_DESC_LEN, strlen(symbl));
+ if (ret) {
+ RTE_LOG(DEBUG, PMD, "symbol resolution (%s) failed\n", symbl);
+ goto clean;
+ }
+
+ /* Reading symbol information */
+ type = SYMBOL_DATA(sym_buf, 0);
+ target = SYMBOL_DATA(sym_buf, 8);
+ domain = SYMBOL_DATA(sym_buf, 16);
+ addr = SYMBOL_UDATA(sym_buf, 24);
+ *size = (ssize_t)SYMBOL_UDATA(sym_buf, 32);
+
+ if (type != 1) {
+ RTE_LOG(INFO, PMD, "wrong symbol type\n");
+ ret = -EINVAL;
+ goto clean;
+ }
+ if (!(target == 7 || target == -7)) {
+ RTE_LOG(INFO, PMD, "wrong symbol target\n");
+ ret = -EINVAL;
+ goto clean;
+ }
+ if (domain == 8 || domain == 9) {
+ RTE_LOG(INFO, PMD, "wrong symbol domain\n");
+ ret = -EINVAL;
+ goto clean;
+ }
+
+ /* Adjusting address based on symbol location */
+ if ((domain >= 24) && (domain < 28) && (target == 7)) {
+ addr = 1ULL << 37 | addr | ((uint64_t)domain & 0x3) << 35;
+ } else {
+ addr = 1ULL << 39 | addr | ((uint64_t)domain & 0x3f) << 32;
+ if (target == -7)
+ target = 7;
+ }
+
+ /* Configuring NFP expansion bar for mapping specific PCI BAR window */
+ nfp_nspu_mem_bar_cfg(desc, expbar, target, addr, pcie_offset);
+
+ /* This is the PCI BAR offset to use by the host */
+ *pcie_offset |= ((expbar & 0x7) << (desc->barsz - 3));
+
+clean:
+ free(sym_buf);
+ return ret;
+}
+
+int
+nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset)
+{
+ ssize_t bar0_sym_size;
+
+ /* If the symbol resolution works, it implies a firmware app
+ * is already there.
+ */
+ if (!nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
+ pcie_offset, &bar0_sym_size))
+ return 0;
+
+ /* No firmware app detected or not the right one */
+ RTE_LOG(INFO, PMD, "No firmware detected. Resetting NFP...\n");
+ if (nfp_fw_reset(desc) < 0) {
+ RTE_LOG(ERR, PMD, "nfp fw reset failed\n");
+ return -ENODEV;
+ }
+
+ RTE_LOG(INFO, PMD, "Reset done.\n");
+ RTE_LOG(INFO, PMD, "Uploading firmware...\n");
+
+ if (nfp_fw_upload(desc) < 0) {
+ RTE_LOG(ERR, PMD, "nfp fw upload failed\n");
+ return -ENODEV;
+ }
+
+ RTE_LOG(INFO, PMD, "Done.\n");
+
+ /* Now the symbol should be there */
+ if (nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
+ pcie_offset, &bar0_sym_size)) {
+ RTE_LOG(ERR, PMD, "nfp PF BAR symbol resolution failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int
+nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
+{
+ ssize_t bar0_sym_size;
+
+ if (nfp_nspu_set_bar_from_symbl(desc, "_pf0_net_bar0",
+ NFP_NET_PF_CFG_EXP_BAR,
+ pcie_offset, &bar0_sym_size))
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * This is a hardcoded fixed NFP internal CPP bus address for the hw queues unit
+ * inside the PCIE island.
+ */
+#define NFP_CPP_PCIE_QUEUES ((uint64_t)(1ULL << 39) | 0x80000 | \
+ ((uint64_t)0x4 & 0x3f) << 32)
+
+/* Configure a specific NFP expansion bar for accessing the vNIC rx/tx BARs */
+void
+nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
+{
+ nfp_nspu_mem_bar_cfg(desc, NFP_NET_PF_HW_QUEUES_EXP_BAR, 0,
+ NFP_CPP_PCIE_QUEUES, pcie_offset);
+
+ /* This is the pcie offset to use by the host */
+ *pcie_offset |= ((NFP_NET_PF_HW_QUEUES_EXP_BAR & 0x7) << (27 - 3));
+}
+
+int
+nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
+{
+ union eth_table_entry *entries, *entry;
+ int modified;
+ int ret, idx;
+ int i;
+
+ idx = port;
+
+ RTE_LOG(INFO, PMD, "Hw ethernet port %d configure...\n", port);
+ rte_spinlock_lock(&desc->nsp_lock);
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries) {
+ rte_spinlock_unlock(&desc->nsp_lock);
+ return -ENOMEM;
+ }
+
+ ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, entries,
+ NSP_ETH_TABLE_SIZE, 0);
+ if (ret) {
+ rte_spinlock_unlock(&desc->nsp_lock);
+ return ret;
+ }
+
+ entry = entries;
+
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
+ /* ports in use do not appear sequentially in the table */
+ if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
+ /* entry not in use */
+ entry++;
+ continue;
+ }
+ if (idx == 0)
+ break;
+ idx--;
+ entry++;
+ }
+
+ if (i == NSP_ETH_MAX_COUNT) {
+ rte_spinlock_unlock(&desc->nsp_lock);
+ return -EINVAL;
+ }
+
+ if (up && !(entry->state & NSP_ETH_STATE_CONFIGURED)) {
+ entry->control |= NSP_ETH_STATE_CONFIGURED;
+ modified = 1;
+ }
+
+ if (!up && (entry->state & NSP_ETH_STATE_CONFIGURED)) {
+ entry->control &= ~NSP_ETH_STATE_CONFIGURED;
+ modified = 1;
+ }
+
+ if (modified) {
+ ret = nspu_command(desc, NSP_CMD_WRITE_ETH_TABLE, 0, 1, entries,
+ 0, NSP_ETH_TABLE_SIZE);
+ if (!ret)
+ RTE_LOG(INFO, PMD,
+ "Hw ethernet port %d configure done\n", port);
+ else
+ RTE_LOG(INFO, PMD,
+ "Hw ethernet port %d configure failed\n", port);
+ }
+ rte_spinlock_unlock(&desc->nsp_lock);
+ return ret;
+}
+
+int
+nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table)
+{
+ int ret;
+
+ RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n");
+ /* port 0 allocates the eth table and read it using NSPU */
+ *table = malloc(NSP_ETH_TABLE_SIZE);
+ if (!table)
+ return -ENOMEM;
+
+ ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table,
+ NSP_ETH_TABLE_SIZE, 0);
+ if (ret)
+ return ret;
+
+ RTE_LOG(INFO, PMD, "Done\n");
+
+ return 0;
+}
diff --git a/drivers/net/nfp/nfp_nspu.h b/drivers/net/nfp/nfp_nspu.h
new file mode 100644
index 00000000..8c33835e
--- /dev/null
+++ b/drivers/net/nfp/nfp_nspu.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_nspu.h
+ *
+ * Netronome NFP_NET PDM driver
+ */
+
+/*
+ * NSP is the NFP Service Processor. NSPU is NSP Userspace interface.
+ *
+ * NFP NSP helps with firmware/hardware configuration. NSP is another component
+ * in NFP programmable processor and accessing it from host requires to firstly
+ * configure a specific NFP PCI expansion BAR.
+ *
+ * Once access is ready, configuration can be done reading and writing
+ * from/to a specific PF PCI BAR window. This same interface will allow to
+ * create other PCI BAR windows for accessing other NFP components.
+ *
+ * This file includes low-level functions, using the NSPU interface, and high
+ * level functions, invoked by the PMD for using NSP services. This allows
+ * firmware upload, vNIC PCI BARs mapping and other low-level configurations
+ * like link setup.
+ *
+ * NSP access is done during initialization and it is not involved at all with
+ * the fast path.
+ */
+
+#include <rte_spinlock.h>
+#include "nfp_net_eth.h"
+
+typedef struct {
+ int nfp; /* NFP device */
+ int pcie_bar; /* PF PCI BAR to work with */
+ int exp_bar; /* Expansion BAR number used by NSPU */
+ int barsz; /* PCIE BAR log2 size */
+ uint64_t bufaddr; /* commands buffer address */
+ size_t buf_size; /* commands buffer size */
+ uint64_t windowsz; /* NSPU BAR window size */
+ void *cfg_base; /* Expansion BARs address */
+ void *mem_base; /* NSP interface */
+ rte_spinlock_t nsp_lock;
+} nspu_desc_t;
+
+int nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
+ int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap);
+int nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor);
+int nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset);
+int nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
+void nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
+int nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up);
+int nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table);
diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile
index 77810bce..9331ccac 100644
--- a/drivers/net/null/Makefile
+++ b/drivers/net/null/Makefile
@@ -38,6 +38,9 @@ LIB = librte_pmd_null.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_null_version.map
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 5aef0591..032c30e9 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -36,7 +36,7 @@
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
@@ -68,7 +68,7 @@ struct null_queue {
struct pmd_internals {
unsigned packet_size;
unsigned packet_copy;
- uint8_t port_id;
+ uint16_t port_id;
struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
@@ -298,7 +298,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
{
unsigned i, num_stats;
@@ -306,7 +306,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
const struct pmd_internals *internal;
if ((dev == NULL) || (igb_stats == NULL))
- return;
+ return -EINVAL;
internal = dev->data->dev_private;
num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
@@ -333,6 +333,8 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
igb_stats->ipackets = rx_total;
igb_stats->opackets = tx_total;
igb_stats->oerrors = tx_err_total;
+
+ return 0;
}
static void
@@ -540,8 +542,6 @@ eth_dev_null_create(struct rte_vdev_device *dev,
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
-
/* finally assign rx and tx ops */
if (packet_copy) {
eth_dev->rx_pkt_burst = eth_null_copy_rx;
diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile
new file mode 100644
index 00000000..9c27fdfe
--- /dev/null
+++ b/drivers/net/octeontx/Makefile
@@ -0,0 +1,79 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+
+EXPORT_MAP := rte_pmd_octeontx_version.map
+
+LIBABIVER := 1
+
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkovf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_pkivf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_bgx.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += octeontx_ethdev.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_octeontx_rxtx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+else
+CFLAGS_octeontx_rxtx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_octeontx_rxtx.o += -O3 -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_mempool_octeontx
+LDLIBS += -lrte_eventdev
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c
new file mode 100644
index 00000000..c2d0d433
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_bgx.c
@@ -0,0 +1,273 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include "octeontx_bgx.h"
+
+int
+octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_conf_t bgx_conf;
+ int len = sizeof(octeontx_mbox_bgx_port_conf_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_CONFIG;
+ hdr.vfid = port;
+
+ memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ if (res < 0)
+ return -EACCES;
+
+ conf->enable = bgx_conf.enable;
+ conf->promisc = bgx_conf.promisc;
+ conf->bpen = bgx_conf.bpen;
+ conf->node = bgx_conf.node;
+ conf->base_chan = bgx_conf.base_chan;
+ conf->num_chans = bgx_conf.num_chans;
+ conf->mtu = bgx_conf.mtu;
+ conf->bgx = bgx_conf.bgx;
+ conf->lmac = bgx_conf.lmac;
+ conf->mode = bgx_conf.mode;
+ conf->pkind = bgx_conf.pkind;
+ memcpy(conf->macaddr, bgx_conf.macaddr, 6);
+
+ return res;
+}
+
+int
+octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_status_t bgx_stat;
+ int len = sizeof(octeontx_mbox_bgx_port_status_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
+ if (res < 0)
+ return -EACCES;
+
+ stat->link_up = bgx_stat.link_up;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats)
+{
+ struct octeontx_mbox_hdr hdr;
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int len = sizeof(octeontx_mbox_bgx_port_stats_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
+ if (res < 0)
+ return -EACCES;
+
+ stats->rx_packets = bgx_stats.rx_packets;
+ stats->rx_bytes = bgx_stats.rx_bytes;
+ stats->rx_dropped = bgx_stats.rx_dropped;
+ stats->rx_errors = bgx_stats.rx_errors;
+ stats->tx_packets = bgx_stats.tx_packets;
+ stats->tx_bytes = bgx_stats.tx_bytes;
+ stats->tx_dropped = bgx_stats.tx_dropped;
+ stats->tx_errors = bgx_stats.tx_errors;
+ return res;
+}
+
+int
+octeontx_bgx_port_stats_clr(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_CLR_STATS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_link_status(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t link;
+ int len = sizeof(uint8_t);
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &link, len);
+ if (res < 0)
+ return -EACCES;
+
+ return link;
+}
+
+int
+octeontx_bgx_port_promisc_set(int port, int en)
+{
+ struct octeontx_mbox_hdr hdr;
+ uint8_t prom;
+ int res;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_PROMISC;
+ hdr.vfid = port;
+ prom = en ? 1 : 0;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr)
+{
+ struct octeontx_mbox_hdr hdr;
+ int len = 6;
+ int res = 0;
+
+ hdr.coproc = OCTEONTX_BGX_COPROC;
+ hdr.msg = MBOX_BGX_PORT_SET_MACADDR;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, mac_addr, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
diff --git a/drivers/net/octeontx/base/octeontx_bgx.h b/drivers/net/octeontx/base/octeontx_bgx.h
new file mode 100644
index 00000000..f740a1d9
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_bgx.h
@@ -0,0 +1,150 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_BGX_H__
+#define __OCTEONTX_BGX_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_BGX_COPROC 6
+
+/* BGX messages */
+#define MBOX_BGX_PORT_OPEN 0
+#define MBOX_BGX_PORT_CLOSE 1
+#define MBOX_BGX_PORT_START 2
+#define MBOX_BGX_PORT_STOP 3
+#define MBOX_BGX_PORT_GET_CONFIG 4
+#define MBOX_BGX_PORT_GET_STATUS 5
+#define MBOX_BGX_PORT_GET_STATS 6
+#define MBOX_BGX_PORT_CLR_STATS 7
+#define MBOX_BGX_PORT_GET_LINK_STATUS 8
+#define MBOX_BGX_PORT_SET_PROMISC 9
+#define MBOX_BGX_PORT_SET_MACADDR 10
+#define MBOX_BGX_PORT_SET_BP 11
+#define MBOX_BGX_PORT_SET_BCAST 12
+#define MBOX_BGX_PORT_SET_MCAST 13
+
+/* BGX port configuration parameters: */
+typedef struct octeontx_mbox_bgx_port_conf {
+ uint8_t enable;
+ uint8_t promisc;
+ uint8_t bpen;
+ uint8_t macaddr[6]; /* MAC address.*/
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint8_t node; /* CPU node */
+ uint16_t base_chan;
+ uint16_t num_chans;
+ uint16_t mtu;
+ uint8_t bgx;
+ uint8_t lmac;
+ uint8_t mode;
+ uint8_t pkind;
+} octeontx_mbox_bgx_port_conf_t;
+
+/* BGX port status: */
+typedef struct octeontx_mbox_bgx_port_status {
+ uint8_t link_up;
+ uint8_t bp;
+} octeontx_mbox_bgx_port_status_t;
+
+/* BGX port statistics: */
+typedef struct octeontx_mbox_bgx_port_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+ uint64_t rx_errors;
+ uint64_t tx_errors;
+ uint64_t rx_dropped;
+ uint64_t tx_dropped;
+ uint64_t multicast;
+ uint64_t collisions;
+
+ uint64_t rx_length_errors;
+ uint64_t rx_over_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_frame_errors;
+ uint64_t rx_fifo_errors;
+ uint64_t rx_missed_errors;
+
+ /* Detailed transmit errors. */
+ uint64_t tx_aborted_errors;
+ uint64_t tx_carrier_errors;
+ uint64_t tx_fifo_errors;
+ uint64_t tx_heartbeat_errors;
+ uint64_t tx_window_errors;
+
+ /* Extended statistics based on RFC2819. */
+ uint64_t rx_1_to_64_packets;
+ uint64_t rx_65_to_127_packets;
+ uint64_t rx_128_to_255_packets;
+ uint64_t rx_256_to_511_packets;
+ uint64_t rx_512_to_1023_packets;
+ uint64_t rx_1024_to_1522_packets;
+ uint64_t rx_1523_to_max_packets;
+
+ uint64_t tx_1_to_64_packets;
+ uint64_t tx_65_to_127_packets;
+ uint64_t tx_128_to_255_packets;
+ uint64_t tx_256_to_511_packets;
+ uint64_t tx_512_to_1023_packets;
+ uint64_t tx_1024_to_1522_packets;
+ uint64_t tx_1523_to_max_packets;
+
+ uint64_t tx_multicast_packets;
+ uint64_t rx_broadcast_packets;
+ uint64_t tx_broadcast_packets;
+ uint64_t rx_undersized_errors;
+ uint64_t rx_oversize_errors;
+ uint64_t rx_fragmented_errors;
+ uint64_t rx_jabber_errors;
+} octeontx_mbox_bgx_port_stats_t;
+
+int octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_close(int port);
+int octeontx_bgx_port_start(int port);
+int octeontx_bgx_port_stop(int port);
+int octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf);
+int octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat);
+int octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats);
+int octeontx_bgx_port_stats_clr(int port);
+int octeontx_bgx_port_link_status(int port);
+int octeontx_bgx_port_promisc_set(int port, int en);
+int octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr);
+
+#endif /* __OCTEONTX_BGX_H__ */
+
diff --git a/drivers/net/octeontx/base/octeontx_io.h b/drivers/net/octeontx/base/octeontx_io.h
new file mode 100644
index 00000000..ec4ce1dc
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_io.h
@@ -0,0 +1,156 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_IO_H__
+#define __OCTEONTX_IO_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <rte_io.h>
+
+/* In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define octeontx_read64 rte_read64_relaxed
+#define octeontx_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define octeontx_prefetch_store_keep(_ptr) ({\
+ asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+
+#define octeontx_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define octeontx_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define octeontx_prefetch_store_keep(_ptr) do {} while (0)
+
+#define octeontx_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define octeontx_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+/**
+ * Perform an atomic fetch-and-add operation.
+ */
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ uint64_t old_val;
+
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldadd %1, %0, [%2]\n"
+ : "=r" (old_val) : "r" (off), "r" (addr) : "memory");
+
+ return old_val;
+}
+
+/**
+ * Perform a LMTST operation - an atomic write of up to 128 byte to
+ * an I/O block that supports this operation type.
+ *
+ * @param lmtline_va is the address where LMTLINE is mapped
+ * @param ioreg_va is the virtual address of the device register
+ * @param cmdbuf is the array of peripheral commands to execute
+ * @param cmdsize is the number of 64-bit words in 'cmdbuf'
+ *
+ * @return N/A
+ */
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ uint64_t result;
+ uint64_t word_count;
+ uint64_t *lmtline = lmtline_va;
+
+ word_count = cmdsize;
+
+ do {
+ /* Copy commands to LMTLINE */
+ for (result = 0; result < word_count; result += 2) {
+ lmtline[result + 0] = cmdbuf[result + 0];
+ lmtline[result + 1] = cmdbuf[result + 1];
+ }
+
+ /* LDEOR initiates atomic transfer to I/O device */
+ __asm__ volatile(
+ " .cpu generic+lse\n"
+ " ldeor xzr, %0, [%1]\n"
+ : "=r" (result) : "r" (ioreg_va) : "memory");
+ } while (!result);
+}
+
+#else
+
+static inline uint64_t
+octeontx_reg_ldadd_u64(void *addr, int64_t off)
+{
+ RTE_SET_USED(addr);
+ RTE_SET_USED(off);
+ return 0;
+}
+
+static inline void
+octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
+ uint64_t cmdsize)
+{
+ RTE_SET_USED(lmtline_va);
+ RTE_SET_USED(ioreg_va);
+ RTE_SET_USED(cmdbuf);
+ RTE_SET_USED(cmdsize);
+}
+
+#endif
+#endif /* __OCTEONTX_IO_H__ */
diff --git a/drivers/net/octeontx/base/octeontx_pki_var.h b/drivers/net/octeontx/base/octeontx_pki_var.h
new file mode 100644
index 00000000..def6cbb9
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -0,0 +1,237 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_PKI_VAR_H__
+#define __OCTEONTX_PKI_VAR_H__
+
+#include <rte_byteorder.h>
+
+#define OCTTX_PACKET_WQE_SKIP 128
+#define OCTTX_PACKET_FIRST_SKIP 240
+#define OCTTX_PACKET_LATER_SKIP 128
+
+/* WQE descriptor */
+typedef union octtx_wqe_s {
+ uint64_t w[6];
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct {
+ uint64_t pknd : 6;
+ uint64_t rsvd0 : 10;
+ uint64_t style : 8;
+ uint64_t bufs : 8;
+ uint64_t chan : 12;
+ uint64_t apad : 3;
+ uint64_t rsvd1 : 1;
+ uint64_t aura : 12;
+ uint64_t rsvd2 : 4;
+ } w0;
+
+ struct {
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd0 : 2;
+ uint64_t rsvd1 : 2;
+ uint64_t len : 16;
+ } w1;
+
+ struct {
+ uint64_t op_code : 8;
+ uint64_t err_lev : 3;
+ uint64_t raw : 1;
+ uint64_t l2m : 1;
+ uint64_t l2b : 1;
+ uint64_t l3m : 1;
+ uint64_t l3b : 1;
+ uint64_t l3fr : 1;
+ uint64_t pf1 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf4 : 1;
+ uint64_t sh : 1;
+ uint64_t vs : 1;
+ uint64_t vv : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t lae : 1;
+ uint64_t lbty : 5;
+ uint64_t lcty : 5;
+ uint64_t ldty : 5;
+ uint64_t lety : 5;
+ uint64_t lfty : 5;
+ uint64_t lgty : 5;
+ uint64_t sw : 1;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t laptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t lcptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t leptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t vlptr : 8;
+ } w4;
+
+ struct {
+ uint64_t rsvd0 : 47;
+ uint64_t dwd : 1;
+ uint64_t size : 16;
+ } w5;
+#else
+ struct {
+ uint64_t rsvd2 : 4;
+ uint64_t aura : 12;
+ uint64_t rsvd1 : 1;
+ uint64_t apad : 3;
+ uint64_t chan : 12;
+ uint64_t bufs : 8;
+ uint64_t style : 8;
+ uint64_t rsvd0 : 10;
+ uint64_t pknd : 6;
+ } w0;
+
+ struct {
+ uint64_t len : 16;
+ uint64_t rsvd1 : 2;
+ uint64_t rsvd0 : 2;
+ uint64_t grp : 10;
+ uint64_t tt : 2;
+ uint64_t tag : 32;
+ } w1;
+
+ struct {
+ uint64_t sw : 1;
+ uint64_t lgty : 5;
+ uint64_t lfty : 5;
+ uint64_t lety : 5;
+ uint64_t ldty : 5;
+ uint64_t lcty : 5;
+ uint64_t lbty : 5;
+ uint64_t lae : 1;
+ uint64_t rsvd0 : 8;
+ uint64_t vv : 1;
+ uint64_t vs : 1;
+ uint64_t sh : 1;
+ uint64_t pf4 : 1;
+ uint64_t pf3 : 1;
+ uint64_t pf2 : 1;
+ uint64_t pf1 : 1;
+ uint64_t l3fr : 1;
+ uint64_t l3b : 1;
+ uint64_t l3m : 1;
+ uint64_t l2b : 1;
+ uint64_t l2m : 1;
+ uint64_t raw : 1;
+ uint64_t err_lev : 3;
+ uint64_t op_code : 8;
+ } w2;
+
+ struct {
+ uint64_t addr; /* Byte addr of start-of-pkt */
+ } w3;
+
+ struct {
+ uint64_t vlptr : 8;
+ uint64_t lgptr : 8;
+ uint64_t lfptr : 8;
+ uint64_t leptr : 8;
+ uint64_t ldprt : 8;
+ uint64_t lcptr : 8;
+ uint64_t lbptr : 8;
+ uint64_t laptr : 8;
+ } w4;
+#endif
+ } s;
+
+} __rte_packed octtx_wqe_t;
+
+enum occtx_pki_ltype_e {
+ OCCTX_PKI_LTYPE_NONE = 0,
+ OCCTX_PKI_LTYPE_ENET = 1,
+ OCCTX_PKI_LTYPE_VLAN = 2,
+ OCCTX_PKI_LTYPE_SNAP_PAYLD = 5,
+ OCCTX_PKI_LTYPE_ARP = 6,
+ OCCTX_PKI_LTYPE_RARP = 7,
+ OCCTX_PKI_LTYPE_IP4 = 8,
+ OCCTX_PKI_LTYPE_IP4_OPT = 9,
+ OCCTX_PKI_LTYPE_IP6 = 0xa,
+ OCCTX_PKI_LTYPE_IP6_OPT = 0xb,
+ OCCTX_PKI_LTYPE_IPSEC_ESP = 0xc,
+ OCCTX_PKI_LTYPE_IPFRAG = 0xd,
+ OCCTX_PKI_LTYPE_IPCOMP = 0xe,
+ OCCTX_PKI_LTYPE_TCP = 0x10,
+ OCCTX_PKI_LTYPE_UDP = 0x11,
+ OCCTX_PKI_LTYPE_SCTP = 0x12,
+ OCCTX_PKI_LTYPE_UDP_VXLAN = 0x13,
+ OCCTX_PKI_LTYPE_GRE = 0x14,
+ OCCTX_PKI_LTYPE_NVGRE = 0x15,
+ OCCTX_PKI_LTYPE_GTP = 0x16,
+ OCCTX_PKI_LTYPE_UDP_GENEVE = 0x17,
+ OCCTX_PKI_LTYPE_SW28 = 0x1c,
+ OCCTX_PKI_LTYPE_SW29 = 0x1d,
+ OCCTX_PKI_LTYPE_SW30 = 0x1e,
+ OCCTX_PKI_LTYPE_SW31 = 0x1f,
+ OCCTX_PKI_LTYPE_LAST
+};
+
+enum lc_type_e {
+ LC_NONE = OCCTX_PKI_LTYPE_NONE,
+ LC_IPV4 = OCCTX_PKI_LTYPE_IP4,
+ LC_IPV4_OPT = OCCTX_PKI_LTYPE_IP4_OPT,
+ LC_IPV6 = OCCTX_PKI_LTYPE_IP6,
+ LC_IPV6_OPT = OCCTX_PKI_LTYPE_IP6_OPT,
+};
+
+enum le_type_e {
+ LE_NONE = OCCTX_PKI_LTYPE_NONE,
+};
+
+enum lf_type_e {
+ LF_NONE = OCCTX_PKI_LTYPE_NONE,
+ LF_IPSEC_ESP = OCCTX_PKI_LTYPE_IPSEC_ESP,
+ LF_IPFRAG = OCCTX_PKI_LTYPE_IPFRAG,
+ LF_IPCOMP = OCCTX_PKI_LTYPE_IPCOMP,
+ LF_TCP = OCCTX_PKI_LTYPE_TCP,
+ LF_UDP = OCCTX_PKI_LTYPE_UDP,
+ LF_GRE = OCCTX_PKI_LTYPE_GRE,
+ LF_UDP_GENEVE = OCCTX_PKI_LTYPE_UDP_GENEVE,
+ LF_UDP_VXLAN = OCCTX_PKI_LTYPE_UDP_VXLAN,
+ LF_NVGRE = OCCTX_PKI_LTYPE_NVGRE,
+};
+#endif /* __OCTEONTX_PKI_VAR_H__ */
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
new file mode 100644
index 00000000..b97f05cd
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -0,0 +1,169 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+
+#include "octeontx_pkivf.h"
+
+int
+octeontx_pki_port_open(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_OPEN;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_hash_cfg_t h_cfg = *(mbox_pki_hash_cfg_t *)hash_cfg;
+ int len = sizeof(mbox_pki_hash_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &h_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int
+octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_pktbuf_cfg_t b_cfg = *(mbox_pki_pktbuf_cfg_t *)buf_cfg;
+ int len = sizeof(mbox_pki_pktbuf_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &b_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+ return res;
+}
+
+int
+octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_qos_cfg_t q_cfg = *(mbox_pki_qos_cfg_t *)qos_cfg;
+ int len = sizeof(mbox_pki_qos_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+
+int
+octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_errcheck_cfg_t e_cfg;
+ e_cfg = *((mbox_pki_errcheck_cfg_t *)(cfg));
+ int len = sizeof(mbox_pki_errcheck_cfg_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &e_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKI_VF 0xA0DD
+
+/* PKIVF pcie device */
+static int
+pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(pci_dev);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_pkivf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKI_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkivf = {
+ .id_table = pci_pkivf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkivf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkivf, pci_pkivf);
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
new file mode 100644
index 00000000..004dedcc
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -0,0 +1,553 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_PKI_H__
+#define __OCTEONTX_PKI_H__
+
+#include <stdint.h>
+
+#include <octeontx_mbox.h>
+
+#define OCTEONTX_PKI_COPROC 5
+
+/* PKI messages */
+
+#define MBOX_PKI_PORT_OPEN 1
+#define MBOX_PKI_PORT_START 2
+#define MBOX_PKI_PORT_STOP 3
+#define MBOX_PKI_PORT_CLOSE 4
+#define MBOX_PKI_PORT_CONFIG 5
+#define MBOX_PKI_PORT_OPT_PARSER_CONFIG 6
+#define MBOX_PKI_PORT_CUSTOM_PARSER_CONFIG 7
+#define MBOX_PKI_PORT_PKTBUF_CONFIG 8
+#define MBOX_PKI_PORT_HASH_CONFIG 9
+#define MBOX_PKI_PORT_ERRCHK_CONFIG 10
+#define MBOX_PKI_PORT_CREATE_QOS 11
+#define MBOX_PKI_PORT_MODIFY_QOS 12
+#define MBOX_PKI_PORT_DELETE_QOS 13
+#define MBOX_PKI_PORT_PKTDROP_CONFIG 14
+#define MBOX_PKI_PORT_WQE_GEN_CONFIG 15
+#define MBOX_PKI_BACKPRESSURE_CONFIG 16
+#define MBOX_PKI_PORT_GET_STATS 17
+#define MBOX_PKI_PORT_RESET_STATS 18
+#define MBOX_PKI_GET_PORT_CONFIG 19
+#define MBOX_PKI_GET_PORT_QOS_CONFIG 20
+
+#define MBOX_PKI_MAX_QOS_ENTRY 64
+
+/* pki pkind parse mode */
+enum {
+ MBOX_PKI_PARSE_LA_TO_LG = 0,
+ MBOX_PKI_PARSE_LB_TO_LG = 1,
+ MBOX_PKI_PARSE_LC_TO_LG = 3,
+ MBOX_PKI_PARSE_LG = 0x3f,
+ MBOX_PKI_PARSE_NOTHING = 0x7f
+};
+
+/* Interface types: */
+enum {
+ OCTTX_PORT_TYPE_NET, /* Network interface ports */
+ OCTTX_PORT_TYPE_INT, /* CPU internal interface ports */
+ OCTTX_PORT_TYPE_PCI, /* DPI/PCIe interface ports */
+ OCTTX_PORT_TYPE_MAX
+};
+
+/* pki port config */
+typedef struct mbox_pki_port_type {
+ uint8_t port_type;
+} mbox_pki_port_t;
+
+/* pki port config */
+typedef struct mbox_pki_port_cfg {
+ uint8_t port_type;
+ struct {
+ uint8_t fcs_pres:1;
+ uint8_t fcs_skip:1;
+ uint8_t parse_mode:1;
+ uint8_t mpls_parse:1;
+ uint8_t inst_hdr_parse:1;
+ uint8_t fulc_parse:1;
+ uint8_t dsa_parse:1;
+ uint8_t hg2_parse:1;
+ uint8_t hg_parse:1;
+ } mmask;
+ uint8_t fcs_pres;
+ uint8_t fcs_skip;
+ uint8_t parse_mode;
+ uint8_t mpls_parse;
+ uint8_t inst_hdr_parse;
+ uint8_t fulc_parse;
+ uint8_t dsa_parse;
+ uint8_t hg2_parse;
+ uint8_t hg_parse;
+} mbox_pki_prt_cfg_t;
+
+/* pki Flow/style packet buffer config */
+typedef struct mbox_pki_port_pktbuf_cfg {
+ uint8_t port_type;
+ struct {
+ uint16_t f_mbuff_size:1;
+ uint16_t f_wqe_skip:1;
+ uint16_t f_first_skip:1;
+ uint16_t f_later_skip:1;
+ uint16_t f_pkt_outside_wqe:1;
+ uint16_t f_wqe_endian:1;
+ uint16_t f_cache_mode:1;
+ } mmask;
+ uint16_t mbuff_size;
+ uint16_t wqe_skip;
+ uint16_t first_skip;
+ uint16_t later_skip;
+ uint8_t pkt_outside_wqe;
+ uint8_t wqe_endian;
+ uint8_t cache_mode;
+} mbox_pki_pktbuf_cfg_t;
+
+/* pki flow/style tag config */
+typedef struct mbox_pki_port_hash_cfg {
+ uint8_t port_type;
+ uint32_t tag_slf:1;
+ uint32_t tag_sle:1;
+ uint32_t tag_sld:1;
+ uint32_t tag_slc:1;
+ uint32_t tag_dlf:1;
+ uint32_t tag_dle:1;
+ uint32_t tag_dld:1;
+ uint32_t tag_dlc:1;
+ uint32_t tag_prt:1;
+ uint32_t tag_vlan0:1;
+ uint32_t tag_vlan1:1;
+ uint32_t tag_ip_pctl:1;
+ uint32_t tag_sync:1;
+ uint32_t tag_spi:1;
+ uint32_t tag_gtp:1;
+ uint32_t tag_vni:1;
+} mbox_pki_hash_cfg_t;
+
+/* pki flow/style errcheck config */
+typedef struct mbox_pki_port_errcheck_cfg {
+ uint8_t port_type;
+ struct {
+ uint32_t f_ip6_udp_opt:1;
+ uint32_t f_lenerr_en:1;
+ uint32_t f_maxerr_en:1;
+ uint32_t f_minerr_en:1;
+ uint32_t f_fcs_chk:1;
+ uint32_t f_fcs_strip:1;
+ uint32_t f_len_lf:1;
+ uint32_t f_len_le:1;
+ uint32_t f_len_ld:1;
+ uint32_t f_len_lc:1;
+ uint32_t f_csum_lf:1;
+ uint32_t f_csum_le:1;
+ uint32_t f_csum_ld:1;
+ uint32_t f_csum_lc:1;
+ uint32_t f_min_frame_len;
+ uint32_t f_max_frame_len;
+ } mmask;
+ uint64_t ip6_udp_opt:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t fcs_chk:1;
+ uint64_t fcs_strip:1;
+ uint64_t len_lf:1;
+ uint64_t len_le:1;
+ uint64_t len_ld:1;
+ uint64_t len_lc:1;
+ uint64_t csum_lf:1;
+ uint64_t csum_le:1;
+ uint64_t csum_ld:1;
+ uint64_t csum_lc:1;
+ uint64_t min_frame_len;
+ uint64_t max_frame_len;
+} mbox_pki_errcheck_cfg_t;
+
+/* CACHE MODE*/
+enum {
+ MBOX_PKI_OPC_MODE_STT = 0LL,
+ MBOX_PKI_OPC_MODE_STF = 1LL,
+ MBOX_PKI_OPC_MODE_STF1_STT = 2LL,
+ MBOX_PKI_OPC_MODE_STF2_STT = 3LL
+};
+
+/* PKI QPG QOS*/
+enum {
+ MBOX_PKI_QPG_QOS_NONE = 0,
+ MBOX_PKI_QPG_QOS_VLAN,
+ MBOX_PKI_QPG_QOS_MPLS,
+ MBOX_PKI_QPG_QOS_DSA_SRC,
+ MBOX_PKI_QPG_QOS_DIFFSERV,
+ MBOX_PKI_QPG_QOS_HIGIG,
+};
+
+struct mbox_pki_qos_entry {
+ uint16_t port_add;
+ uint16_t ggrp_ok;
+ uint16_t ggrp_bad;
+ uint16_t gaura;
+ uint8_t grptag_ok;
+ uint8_t grptag_bad;
+};
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_create_qos {
+ uint8_t port_type;
+ uint8_t qpg_qos;
+ uint8_t num_entry;
+ uint8_t tag_type;
+ uint8_t drop_policy;
+ struct mbox_pki_qos_entry qos_entry[MBOX_PKI_MAX_QOS_ENTRY];
+} mbox_pki_qos_cfg_t;
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_modify_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+ struct {
+ uint8_t f_port_add:1;
+ uint8_t f_grp_ok:1;
+ uint8_t f_grp_bad:1;
+ uint8_t f_gaura:1;
+ uint8_t f_grptag_ok:1;
+ uint8_t f_grptag_bad:1;
+ uint8_t f_tag_type:1;
+ } mmask;
+ uint8_t tag_type;
+ struct mbox_pki_qos_entry qos_entry;
+} mbox_pki_mod_qos_t;
+
+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+} mbox_pki_del_qos_t;
+
+/* PKI maximum constants */
+#define PKI_VF_MAX (1)
+#define PKI_MAX_PKTLEN (32768)
+
+/* pki pkind parse mode */
+enum {
+ PKI_PARSE_LA_TO_LG = 0,
+ PKI_PARSE_LB_TO_LG = 1,
+ PKI_PARSE_LC_TO_LG = 3,
+ PKI_PARSE_LG = 0x3f,
+ PKI_PARSE_NOTHING = 0x7f
+};
+
+/* pki port config */
+typedef struct pki_port_cfg {
+ uint8_t port_type;
+ struct {
+ uint8_t fcs_pres:1;
+ uint8_t fcs_skip:1;
+ uint8_t parse_mode:1;
+ uint8_t mpls_parse:1;
+ uint8_t inst_hdr_parse:1;
+ uint8_t fulc_parse:1;
+ uint8_t dsa_parse:1;
+ uint8_t hg2_parse:1;
+ uint8_t hg_parse:1;
+ } mmask;
+ uint8_t fcs_pres;
+ uint8_t fcs_skip;
+ uint8_t parse_mode;
+ uint8_t mpls_parse;
+ uint8_t inst_hdr_parse;
+ uint8_t fulc_parse;
+ uint8_t dsa_parse;
+ uint8_t hg2_parse;
+ uint8_t hg_parse;
+} pki_prt_cfg_t;
+
+
+/* pki Flow/style packet buffer config */
+typedef struct pki_port_pktbuf_cfg {
+ uint8_t port_type;
+ struct {
+ uint16_t f_mbuff_size:1;
+ uint16_t f_wqe_skip:1;
+ uint16_t f_first_skip:1;
+ uint16_t f_later_skip:1;
+ uint16_t f_pkt_outside_wqe:1;
+ uint16_t f_wqe_endian:1;
+ uint16_t f_cache_mode:1;
+ } mmask;
+ uint16_t mbuff_size;
+ uint16_t wqe_skip;
+ uint16_t first_skip;
+ uint16_t later_skip;
+ uint8_t pkt_outside_wqe;
+ uint8_t wqe_endian;
+ uint8_t cache_mode;
+} pki_pktbuf_cfg_t;
+
+/* pki flow/style tag config */
+typedef struct pki_port_hash_cfg {
+ uint8_t port_type;
+ uint32_t tag_slf:1;
+ uint32_t tag_sle:1;
+ uint32_t tag_sld:1;
+ uint32_t tag_slc:1;
+ uint32_t tag_dlf:1;
+ uint32_t tag_dle:1;
+ uint32_t tag_dld:1;
+ uint32_t tag_dlc:1;
+ uint32_t tag_prt:1;
+ uint32_t tag_vlan0:1;
+ uint32_t tag_vlan1:1;
+ uint32_t tag_ip_pctl:1;
+ uint32_t tag_sync:1;
+ uint32_t tag_spi:1;
+ uint32_t tag_gtp:1;
+ uint32_t tag_vni:1;
+} pki_hash_cfg_t;
+
+/* pki flow/style errcheck config */
+typedef struct pki_port_errcheck_cfg {
+ uint8_t port_type;
+ struct {
+ uint32_t f_ip6_udp_opt:1;
+ uint32_t f_lenerr_en:1;
+ uint32_t f_maxerr_en:1;
+ uint32_t f_minerr_en:1;
+ uint32_t f_fcs_chk:1;
+ uint32_t f_fcs_strip:1;
+ uint32_t f_len_lf:1;
+ uint32_t f_len_le:1;
+ uint32_t f_len_ld:1;
+ uint32_t f_len_lc:1;
+ uint32_t f_csum_lf:1;
+ uint32_t f_csum_le:1;
+ uint32_t f_csum_ld:1;
+ uint32_t f_csum_lc:1;
+ uint32_t f_min_frame_len;
+ uint32_t f_max_frame_len;
+ } mmask;
+ uint64_t ip6_udp_opt:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t fcs_chk:1;
+ uint64_t fcs_strip:1;
+ uint64_t len_lf:1;
+ uint64_t len_le:1;
+ uint64_t len_ld:1;
+ uint64_t len_lc:1;
+ uint64_t csum_lf:1;
+ uint64_t csum_le:1;
+ uint64_t csum_ld:1;
+ uint64_t csum_lc:1;
+ uint64_t min_frame_len;
+ uint64_t max_frame_len;
+} pki_errchk_cfg_t;
+
+
+/* CACHE MODE*/
+enum {
+ PKI_OPC_MODE_STT = 0LL,
+ PKI_OPC_MODE_STF = 1LL,
+ PKI_OPC_MODE_STF1_STT = 2LL,
+ PKI_OPC_MODE_STF2_STT = 3LL
+};
+
+/* PKI QPG QOS*/
+enum {
+ PKI_QPG_QOS_NONE = 0,
+ PKI_QPG_QOS_VLAN,
+ PKI_QPG_QOS_MPLS,
+ PKI_QPG_QOS_DSA_SRC,
+ PKI_QPG_QOS_DIFFSERV,
+ PKI_QPG_QOS_HIGIG,
+};
+
+struct pki_qos_entry {
+ uint16_t port_add;
+ uint16_t ggrp_ok;
+ uint16_t ggrp_bad;
+ uint16_t gaura;
+ uint8_t grptag_ok;
+ uint8_t grptag_bad;
+ uint8_t ena_red;
+ uint8_t ena_drop;
+};
+
+#define PKO_MAX_QOS_ENTRY 64
+
+/* pki flow/style enable qos */
+typedef struct pki_port_create_qos {
+ uint8_t port_type;
+ uint8_t qpg_qos;
+ uint8_t num_entry;
+ uint8_t tag_type;
+ uint8_t drop_policy;
+ struct pki_qos_entry qos_entry[PKO_MAX_QOS_ENTRY];
+} pki_qos_cfg_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
+typedef struct pki_port_modify_qos_entry {
+ uint8_t port_type;
+ uint16_t index;
+ struct {
+ uint8_t f_port_add:1;
+ uint8_t f_grp_ok:1;
+ uint8_t f_grp_bad:1;
+ uint8_t f_gaura:1;
+ uint8_t f_grptag_ok:1;
+ uint8_t f_grptag_bad:1;
+ uint8_t f_tag_type:1;
+ } mmask;
+ uint8_t tag_type;
+ struct pki_qos_entry qos_entry;
+} pki_mod_qos_t;
+
+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+ int len = sizeof(mbox_pki_mod_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+ int len = sizeof(mbox_pki_del_qos_t);
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_CLOSE;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_START;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+ struct octeontx_mbox_hdr hdr;
+ int res;
+
+ mbox_pki_port_t ptype;
+ int len = sizeof(mbox_pki_port_t);
+ memset(&ptype, 0, len);
+ ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+ hdr.coproc = OCTEONTX_PKI_COPROC;
+ hdr.msg = MBOX_PKI_PORT_STOP;
+ hdr.vfid = port;
+
+ res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ if (res < 0)
+ return -EACCES;
+
+ return res;
+}
+
+int octeontx_pki_port_open(int port);
+int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
+int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
+int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
+int octeontx_pki_port_close(int port);
+int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);
+
+#endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.c b/drivers/net/octeontx/base/octeontx_pkovf.c
new file mode 100644
index 00000000..f01d948e
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -0,0 +1,617 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_eal.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_spinlock.h>
+
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
+#include "octeontx_pkovf.h"
+
+struct octeontx_pko_iomem {
+ uint8_t *va;
+ rte_iova_t iova;
+ size_t size;
+};
+
+#define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
+
+struct octeontx_pko_fc_ctl_s {
+ int64_t buf_cnt;
+ int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
+};
+
+struct octeontx_pkovf {
+ uint8_t *bar0;
+ uint8_t *bar2;
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct octeontx_pko_vf_ctl_s {
+ rte_spinlock_t lock;
+
+ struct octeontx_pko_iomem fc_iomem;
+ struct octeontx_pko_fc_ctl_s *fc_ctl;
+ struct octeontx_pkovf pko[PKO_VF_MAX];
+ struct {
+ uint64_t chanid;
+ } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
+};
+
+static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
+
+static void *
+octeontx_pko_dq_vf_bar0(uint16_t txq)
+{
+ int vf_ix;
+
+ vf_ix = txq / PKO_VF_NUM_DQ;
+ return pko_vf_ctl.pko[vf_ix].bar0;
+}
+
+static int
+octeontx_pko_dq_gdq(uint16_t txq)
+{
+ return txq % PKO_VF_NUM_DQ;
+}
+
+/**
+ * Open a PKO DQ.
+ */
+static inline
+int octeontx_pko_dq_open(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int gdq;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ gdq = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(gdq < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+ *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
+ PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
+
+ rte_wmb();
+
+ octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
+ vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
+
+ /* Set the register to return descriptor (packet) count as DEPTH */
+ /* KIND=1, NCB_QUERY_RSP=0 */
+ octeontx_write64(1ull << PKO_DQ_KIND_BIT,
+ vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
+ reg_off = PKO_VF_DQ_OP_OPEN(gdq);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::OPEN */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xC: /* DQALREADYCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
+}
+
+/**
+ * Close a PKO DQ
+ * Flush all packets pending.
+ */
+static inline
+int octeontx_pko_dq_close(uint16_t txq)
+{
+ unsigned int reg_off;
+ uint8_t *vf_bar0;
+ uint64_t rtn;
+ int res;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+
+ if (unlikely(res < 0 || vf_bar0 == NULL))
+ return -EINVAL;
+
+ reg_off = PKO_VF_DQ_OP_CLOSE(res);
+
+ rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
+
+ /* PKO_DQOP_E::CLOSE */
+ if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
+ return -EIO;
+
+ switch (rtn >> PKO_DQ_STATUS_BIT) {
+ case 0xD: /* DQNOTCREATED */
+ case 0x0: /* PASS */
+ break;
+ default:
+ return -EIO;
+ }
+
+ res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
+ return res;
+}
+
+/* Flush all packets pending on a DQ */
+static inline
+int octeontx_pko_dq_drain(uint16_t txq)
+{
+ unsigned int gdq;
+ uint8_t *vf_bar0;
+ uint64_t reg;
+ int res, timo = PKO_DQ_DRAIN_TO;
+
+ vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
+ res = octeontx_pko_dq_gdq(txq);
+ gdq = res;
+
+ /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
+ octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+ /* Wait until buffers leave DQs */
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ while (reg && timo > 0) {
+ rte_delay_us(100);
+ timo--;
+ reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
+ }
+ /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
+ octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
+
+ return reg;
+}
+
+static inline int
+octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_num, unsigned int dq_from)
+{
+ unsigned int dq, dq_cnt;
+ unsigned int dq_base;
+
+ dq_cnt = 0;
+ dq = dq_from;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_base = dq;
+ dq_cnt = 0;
+ while (ctl->dq_map[dq].chanid == ~chanid &&
+ dq < RTE_DIM(ctl->dq_map)) {
+ dq_cnt++;
+ if (dq_cnt == dq_num)
+ return dq_base;
+ dq++;
+ }
+ dq++;
+ }
+ return -1;
+}
+
+static inline void
+octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ unsigned int dq_base, unsigned int dq_num)
+{
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ while (dq_cnt < dq_num) {
+ dq = dq_base + dq_cnt;
+
+ octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
+ chanid);
+
+ ctl->dq_map[dq].chanid = ~chanid;
+ dq_cnt++;
+ }
+}
+
+static inline int
+octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
+ unsigned int dq_num, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ int dq;
+
+ rte_spinlock_lock(&ctl->lock);
+
+ dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
+ if (dq < 0 || (unsigned int)dq != dq_base) {
+ rte_spinlock_unlock(&ctl->lock);
+ return -1;
+ }
+ octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
+
+ rte_spinlock_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ const uint64_t null_chanid = ~0ull;
+ unsigned int dq = 0, dq_cnt = 0;
+
+ rte_spinlock_lock(&ctl->lock);
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ if (ctl->dq_map[dq].chanid == ~chanid) {
+ ctl->dq_map[dq].chanid = ~null_chanid;
+ dq_cnt++;
+ }
+ dq++;
+ }
+ rte_spinlock_unlock(&ctl->lock);
+
+ return dq_cnt > 0 ? 0 : -EINVAL;
+}
+
+int
+octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+int
+octeontx_pko_channel_close(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int res;
+
+ res = octeontx_pko_dq_free(ctl, chanid);
+ if (res < 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq_vf;
+ unsigned int dq, dq_cnt;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ if (octeontx_pko_dq_open(dq) < 0)
+ break;
+
+ dq_cnt++;
+ dq++;
+ }
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_start(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_chan_start(ctl, chanid);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+static inline int
+octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
+{
+ unsigned int dq, dq_cnt, dq_vf;
+ int res;
+
+ dq_cnt = 0;
+ dq = 0;
+ while (dq < RTE_DIM(ctl->dq_map)) {
+ dq_vf = dq / PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0) {
+ dq += PKO_VF_NUM_DQ;
+ continue;
+ }
+
+ if (ctl->dq_map[dq].chanid != ~chanid) {
+ dq++;
+ continue;
+ }
+
+ res = octeontx_pko_dq_drain(dq);
+ if (res > 0)
+ octeontx_log_err("draining DQ%d, buffers left: %x",
+ dq, res);
+
+ res = octeontx_pko_dq_close(dq);
+ if (res < 0)
+ octeontx_log_err("closing DQ%d failed\n", dq);
+
+ dq_cnt++;
+ dq++;
+ }
+ return dq_cnt;
+}
+
+int
+octeontx_pko_channel_stop(int chanid)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+
+ octeontx_pko_chan_stop(ctl, chanid);
+ return 0;
+}
+
+static inline int
+octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
+ void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ octeontx_dq_t curr;
+ unsigned int dq_vf;
+ unsigned int dq;
+
+ RTE_SET_USED(out_elem_size);
+ memset(&curr, 0, sizeof(octeontx_dq_t));
+
+ dq_vf = dq_num / PKO_VF_NUM_DQ;
+ dq = dq_num % PKO_VF_NUM_DQ;
+
+ if (!ctl->pko[dq_vf].bar0)
+ return -EINVAL;
+
+ if (ctl->dq_map[dq_num].chanid != ~chanid)
+ return -EINVAL;
+
+ uint8_t *iter = (uint8_t *)out;
+ curr.lmtline_va = ctl->pko[dq_vf].bar2;
+ curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
+ + PKO_VF_DQ_OP_SEND((dq), 0));
+ curr.fc_status_va = ctl->fc_ctl + dq;
+
+ octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
+ curr.lmtline_va, curr.ioreg_va,
+ curr.fc_status_va);
+
+ getter(&curr, (void *)iter);
+ return 0;
+}
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter)
+{
+ struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
+ int dq_cnt;
+
+ dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
+ dq_num, getter);
+ if (dq_cnt < 0)
+ return -1;
+
+ return dq_cnt;
+}
+
+int
+octeontx_pko_vf_count(void)
+{
+ int vf_cnt;
+
+ vf_cnt = 0;
+ while (pko_vf_ctl.pko[vf_cnt].bar0)
+ vf_cnt++;
+
+ return vf_cnt;
+}
+
+int
+octeontx_pko_init_fc(const size_t pko_vf_count)
+{
+ int dq_ix;
+ uint64_t reg;
+ uint8_t *vf_bar0;
+ size_t vf_idx;
+ size_t fc_mem_size;
+
+ fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
+ pko_vf_count * PKO_VF_NUM_DQ;
+
+ pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
+ if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
+ octeontx_log_err("fc_iomem: not enough memory");
+ return -ENOMEM;
+ }
+
+ pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
+ pko_vf_ctl.fc_iomem.va);
+ pko_vf_ctl.fc_iomem.size = fc_mem_size;
+
+ pko_vf_ctl.fc_ctl =
+ (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
+
+ /* Configure Flow-Control feature for all DQs of open VFs */
+ for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
+ dq_ix = vf_idx * PKO_VF_NUM_DQ;
+
+ vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
+
+ reg = (pko_vf_ctl.fc_iomem.iova +
+ (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
+ reg |= /* BASE */
+ (0x2 << 3) | /* HYST_BITS */
+ (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
+ (0x1 << 0); /* ENABLE */
+
+ octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
+
+ octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
+ vf_bar0, (int)vf_idx, reg);
+ }
+ return 0;
+}
+
+void
+octeontx_pko_fc_free(void)
+{
+ rte_free(pko_vf_ctl.fc_iomem.va);
+}
+
+static void
+octeontx_pkovf_setup(void)
+{
+ static bool init_once;
+
+ if (!init_once) {
+ unsigned int i;
+
+ rte_spinlock_init(&pko_vf_ctl.lock);
+
+ pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
+ pko_vf_ctl.fc_ctl = NULL;
+
+ for (i = 0; i < PKO_VF_MAX; i++) {
+ pko_vf_ctl.pko[i].bar0 = NULL;
+ pko_vf_ctl.pko[i].bar2 = NULL;
+ pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
+ pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
+ }
+
+ for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
+ pko_vf_ctl.dq_map[i].chanid = 0;
+
+ init_once = true;
+ }
+}
+
+/* PKOVF pcie device*/
+static int
+pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint16_t domain;
+ uint8_t *bar0;
+ uint8_t *bar2;
+ struct octeontx_pkovf *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ octeontx_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ bar0 = pci_dev->mem_resource[0].addr;
+ bar2 = pci_dev->mem_resource[2].addr;
+
+ octeontx_pkovf_setup();
+
+ /* get vfid and domain */
+ val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
+ domain = (val >> 7) & 0xffff;
+ vfid = (val >> 23) & 0xffff;
+
+ if (unlikely(vfid >= PKO_VF_MAX)) {
+ octeontx_log_err("pko: Invalid vfid %d", vfid);
+ return -EINVAL;
+ }
+
+ res = &pko_vf_ctl.pko[vfid];
+ res->vfid = vfid;
+ res->domain = domain;
+ res->bar0 = bar0;
+ res->bar2 = bar2;
+
+ octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
+ return 0;
+}
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049
+
+static const struct rte_pci_id pci_pkovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_PKO_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_pkovf = {
+ .id_table = pci_pkovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = pkovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.h b/drivers/net/octeontx/base/octeontx_pkovf.h
new file mode 100644
index 00000000..cfc3715d
--- /dev/null
+++ b/drivers/net/octeontx/base/octeontx_pkovf.h
@@ -0,0 +1,97 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_PKO_H__
+#define __OCTEONTX_PKO_H__
+
+/* PKO maximum constants */
+#define PKO_VF_MAX (32)
+#define PKO_VF_NUM_DQ (8)
+#define PKO_MAX_NUM_DQ (8)
+#define PKO_DQ_DRAIN_TO (1000)
+
+#define PKO_DQ_FC_SKID (4)
+#define PKO_DQ_FC_DEPTH_PAGES (2048)
+#define PKO_DQ_FC_STRIDE_16 (16)
+#define PKO_DQ_FC_STRIDE_128 (128)
+#define PKO_DQ_FC_STRIDE PKO_DQ_FC_STRIDE_16
+
+#define PKO_DQ_KIND_BIT 49
+#define PKO_DQ_STATUS_BIT 60
+#define PKO_DQ_OP_BIT 48
+
+/* PKO VF register offsets from VF_BAR0 */
+#define PKO_VF_DQ_SW_XOFF(gdq) (0x000100 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CTL(gdq) (0x000130 | (gdq) << 17)
+#define PKO_VF_DQ_WM_CNT(gdq) (0x000150 | (gdq) << 17)
+#define PKO_VF_DQ_FC_CONFIG (0x000160)
+#define PKO_VF_DQ_FC_STATUS(gdq) (0x000168 | (gdq) << 17)
+#define PKO_VF_DQ_OP_SEND(gdq, op) (0x001000 | (gdq) << 17 | (op) << 3)
+#define PKO_VF_DQ_OP_OPEN(gdq) (0x001100 | (gdq) << 17)
+#define PKO_VF_DQ_OP_CLOSE(gdq) (0x001200 | (gdq) << 17)
+#define PKO_VF_DQ_OP_QUERY(gdq) (0x001300 | (gdq) << 17)
+
+/* pko_send_hdr_s + pko_send_link */
+#define PKO_CMD_SZ (2 << 1)
+#define PKO_SEND_GATHER_SUBDC (0x0ull << 60)
+#define PKO_SEND_GATHER_LDTYPE(x) ((x) << 58)
+#define PKO_SEND_GATHER_GAUAR(x) ((x) << 24)
+
+typedef struct octeontx_dq_s {
+ void *lmtline_va;
+ void *ioreg_va;
+ void *fc_status_va;
+} octeontx_dq_t;
+
+/**
+ * Function for extracting information out of a given DQ.
+ *
+ * It is intended to be used in slow path (configuration) in
+ * octeontx_pko_channel_query().
+ *
+ * @param dq The DQ to extract information from.
+ * @param out Pointer to the user's structure he wants to fill.
+ */
+typedef void (*octeontx_pko_dq_getter_t)(octeontx_dq_t *dq, void *out);
+
+int
+octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
+ size_t dq_num, octeontx_pko_dq_getter_t getter);
+int octeontx_pko_channel_open(int dq_base, int dq_num, int chanid);
+int octeontx_pko_channel_close(int chanid);
+int octeontx_pko_channel_start(int chanid);
+int octeontx_pko_channel_stop(int chanid);
+int octeontx_pko_vf_count(void);
+int octeontx_pko_init_fc(const size_t pko_vf_count);
+void octeontx_pko_fc_free(void);
+
+#endif /* __OCTEONTX_PKO_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
new file mode 100644
index 00000000..bd24ec33
--- /dev/null
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -0,0 +1,1333 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_prefetch.h>
+#include <rte_bus_vdev.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+struct octeontx_vdev_init_params {
+ uint8_t nr_port;
+};
+
+enum octeontx_link_speed {
+ OCTEONTX_LINK_SPEED_SGMII,
+ OCTEONTX_LINK_SPEED_XAUI,
+ OCTEONTX_LINK_SPEED_RXAUI,
+ OCTEONTX_LINK_SPEED_10G_R,
+ OCTEONTX_LINK_SPEED_40G_R,
+ OCTEONTX_LINK_SPEED_RESERVE1,
+ OCTEONTX_LINK_SPEED_QSGMII,
+ OCTEONTX_LINK_SPEED_RESERVE2
+};
+
+/* Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *)extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ octeontx_log_err("argument has to be positive.");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
+ struct rte_vdev_device *dev)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ static const char * const octeontx_vdev_valid_params[] = {
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ NULL
+ };
+
+ const char *input_args = rte_vdev_device_args(dev);
+ if (params == NULL)
+ return -EINVAL;
+
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ octeontx_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ OCTEONTX_VDEV_NR_PORT_ARG,
+ &parse_integer_arg,
+ &params->nr_port);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+octeontx_port_open(struct octeontx_nic *nic)
+{
+ octeontx_mbox_bgx_port_conf_t bgx_port_conf;
+ int res;
+
+ res = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
+ if (res < 0) {
+ octeontx_log_err("failed to open port %d", res);
+ return res;
+ }
+
+ nic->node = bgx_port_conf.node;
+ nic->port_ena = bgx_port_conf.enable;
+ nic->base_ichan = bgx_port_conf.base_chan;
+ nic->base_ochan = bgx_port_conf.base_chan;
+ nic->num_ichans = bgx_port_conf.num_chans;
+ nic->num_ochans = bgx_port_conf.num_chans;
+ nic->mtu = bgx_port_conf.mtu;
+ nic->bpen = bgx_port_conf.bpen;
+ nic->fcs_strip = bgx_port_conf.fcs_strip;
+ nic->bcast_mode = bgx_port_conf.bcast_mode;
+ nic->mcast_mode = bgx_port_conf.mcast_mode;
+ nic->speed = bgx_port_conf.mode;
+
+ memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
+
+ octeontx_log_dbg("port opened %d", nic->port_id);
+ return res;
+}
+
+static void
+octeontx_port_close(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ octeontx_bgx_port_close(nic->port_id);
+ octeontx_log_dbg("port closed %d", nic->port_id);
+}
+
+static int
+octeontx_port_start(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_start(nic->port_id);
+}
+
+static int
+octeontx_port_stop(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return octeontx_bgx_port_stop(nic->port_id);
+}
+
+static void
+octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
+{
+ struct rte_eth_dev *dev;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+ dev = nic->dev;
+
+ res = octeontx_bgx_port_promisc_set(nic->port_id, en);
+ if (res < 0)
+ octeontx_log_err("failed to set promiscuous mode %d",
+ nic->port_id);
+
+ /* Set proper flag for the mode */
+ dev->data->promiscuous = (en != 0) ? 1 : 0;
+
+ octeontx_log_dbg("port %d : promiscuous mode %s",
+ nic->port_id, en ? "set" : "unset");
+}
+
+static int
+octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
+{
+ octeontx_mbox_bgx_port_stats_t bgx_stats;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
+ if (res < 0) {
+ octeontx_log_err("failed to get port stats %d", nic->port_id);
+ return res;
+ }
+
+ stats->ipackets = bgx_stats.rx_packets;
+ stats->ibytes = bgx_stats.rx_bytes;
+ stats->imissed = bgx_stats.rx_dropped;
+ stats->ierrors = bgx_stats.rx_errors;
+ stats->opackets = bgx_stats.tx_packets;
+ stats->obytes = bgx_stats.tx_bytes;
+ stats->oerrors = bgx_stats.tx_errors;
+
+ octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
+ nic->port_id, stats->ipackets, stats->opackets);
+
+ return 0;
+}
+
+static void
+octeontx_port_stats_clr(struct octeontx_nic *nic)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ octeontx_bgx_port_stats_clr(nic->port_id);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+octeontx_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ RTE_SET_USED(conf);
+
+ if (!rte_eal_has_hugepages()) {
+ octeontx_log_err("huge page is not configured");
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode) {
+ octeontx_log_err("tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc) {
+ PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
+ rxmode->hw_strip_crc = 1;
+ }
+
+ if (rxmode->hw_ip_checksum) {
+ PMD_INIT_LOG(NOTICE, "rxcksum not supported");
+ rxmode->hw_ip_checksum = 0;
+ }
+
+ if (rxmode->split_hdr_size) {
+ octeontx_log_err("rxmode does not support split header");
+ return -EINVAL;
+ }
+
+ if (rxmode->hw_vlan_filter) {
+ octeontx_log_err("VLAN filter not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->hw_vlan_extend) {
+ octeontx_log_err("VLAN extended not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->enable_lro) {
+ octeontx_log_err("LRO not supported");
+ return -EINVAL;
+ }
+
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ octeontx_log_err("setting link speed/duplex not supported");
+ return -EINVAL;
+ }
+
+ if (conf->dcb_capability_en) {
+ octeontx_log_err("DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ octeontx_log_err("flow director not supported");
+ return -EINVAL;
+ }
+
+ nic->num_tx_queues = dev->data->nb_tx_queues;
+
+ ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
+ nic->num_tx_queues,
+ nic->base_ochan);
+ if (ret) {
+ octeontx_log_err("failed to open channel %d no-of-txq %d",
+ nic->base_ochan, nic->num_tx_queues);
+ return -EFAULT;
+ }
+
+ nic->pki.classifier_enable = false;
+ nic->pki.hash_enable = true;
+ nic->pki.initialized = false;
+
+ return 0;
+}
+
+static void
+octeontx_dev_close(struct rte_eth_dev *dev)
+{
+ struct octeontx_txq *txq = NULL;
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ unsigned int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_close(nic->evdev);
+
+ ret = octeontx_pko_channel_close(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to close channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ }
+ /* Free txq resources for this port */
+ for (i = 0; i < nic->num_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ rte_free(txq);
+ }
+}
+
+static int
+octeontx_dev_start(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Tx start
+ */
+ dev->tx_pkt_burst = octeontx_xmit_pkts;
+ ret = octeontx_pko_channel_start(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
+ nic->port_id, nic->num_tx_queues, nic->base_ochan,
+ ret);
+ goto error;
+ }
+
+ /*
+ * Rx start
+ */
+ dev->rx_pkt_burst = octeontx_recv_pkts;
+ ret = octeontx_pki_port_start(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("fail to start Rx on port %d", nic->port_id);
+ goto channel_stop_error;
+ }
+
+ /*
+ * Start port
+ */
+ ret = octeontx_port_start(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed start port %d", ret);
+ goto pki_port_stop_error;
+ }
+
+ PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
+ nic->base_ochan, nic->num_tx_queues, nic->port_id);
+
+ ret = rte_event_dev_start(nic->evdev);
+ if (ret < 0) {
+ octeontx_log_err("failed to start evdev: ret (%d)", ret);
+ goto pki_port_stop_error;
+ }
+
+ /* Success */
+ return ret;
+
+pki_port_stop_error:
+ octeontx_pki_port_stop(nic->port_id);
+channel_stop_error:
+ octeontx_pko_channel_stop(nic->base_ochan);
+error:
+ return ret;
+}
+
+static void
+octeontx_dev_stop(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_event_dev_stop(nic->evdev);
+
+ ret = octeontx_port_stop(nic);
+ if (ret < 0) {
+ octeontx_log_err("failed to req stop port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pki_port_stop(nic->port_id);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop pki port %d res=%d",
+ nic->port_id, ret);
+ return;
+ }
+
+ ret = octeontx_pko_channel_stop(nic->base_ochan);
+ if (ret < 0) {
+ octeontx_log_err("failed to stop channel %d VF%d %d %d",
+ nic->base_ochan, nic->port_id, nic->num_tx_queues,
+ ret);
+ return;
+ }
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
+}
+
+static void
+octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_promisc_set(nic, 1);
+}
+
+static void
+octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_promisc_set(nic, 0);
+}
+
+static inline int
+octeontx_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+octeontx_port_link_status(struct octeontx_nic *nic)
+{
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+ res = octeontx_bgx_port_link_status(nic->port_id);
+ if (res < 0) {
+ octeontx_log_err("failed to get port %d link status",
+ nic->port_id);
+ return res;
+ }
+
+ nic->link_up = (uint8_t)res;
+ octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
+
+ return res;
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+octeontx_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_eth_link link;
+ int res;
+
+ res = 0;
+ PMD_INIT_FUNC_TRACE();
+
+ res = octeontx_port_link_status(nic);
+ if (res < 0) {
+ octeontx_log_err("failed to request link status %d", res);
+ return res;
+ }
+
+ link.link_status = nic->link_up;
+
+ switch (nic->speed) {
+ case OCTEONTX_LINK_SPEED_SGMII:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_XAUI:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RXAUI:
+ case OCTEONTX_LINK_SPEED_10G_R:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case OCTEONTX_LINK_SPEED_QSGMII:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+ case OCTEONTX_LINK_SPEED_40G_R:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+
+ case OCTEONTX_LINK_SPEED_RESERVE1:
+ case OCTEONTX_LINK_SPEED_RESERVE2:
+ default:
+ octeontx_log_err("incorrect link speed %d", nic->speed);
+ break;
+ }
+
+ link.link_duplex = ETH_LINK_AUTONEG;
+ link.link_autoneg = ETH_LINK_SPEED_AUTONEG;
+
+ return octeontx_atomic_write_link_status(dev, &link);
+}
+
+static int
+octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ return octeontx_port_stats(nic, stats);
+}
+
+static void
+octeontx_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ octeontx_port_stats_clr(nic);
+}
+
+static void
+octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ int ret;
+
+ ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
+ if (ret != 0)
+ octeontx_log_err("failed to set MAC address on port %d",
+ nic->port_id);
+}
+
+static void
+octeontx_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ RTE_SET_USED(dev);
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_40G;
+
+ dev_info->driver_name = RTE_STR(rte_octeontx_pmd);
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
+ dev_info->max_rx_queues = 1;
+ dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = 0,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = 0,
+ .txq_flags =
+ ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_NOXSUMS,
+ };
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE;
+}
+
+static void
+octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
+{
+ ((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
+ ((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
+ ((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
+}
+
+static int
+octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ struct octeontx_txq *txq;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[qidx];
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto close_port;
+ }
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return res;
+
+close_port:
+ (void)octeontx_port_stop(nic);
+ octeontx_pko_channel_stop(nic->base_ochan);
+ octeontx_pko_channel_close(nic->base_ochan);
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return res;
+}
+
+static int
+octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+ return octeontx_vf_start_tx_queue(dev, nic, qidx);
+}
+
+static inline int
+octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
+ uint16_t qidx)
+{
+ int ret = 0;
+
+ RTE_SET_USED(nic);
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+static int
+octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ qidx = qidx % PKO_VF_NUM_DQ;
+
+ return octeontx_vf_stop_tx_queue(dev, nic, qidx);
+}
+
+static void
+octeontx_dev_tx_queue_release(void *tx_queue)
+{
+ struct octeontx_txq *txq = tx_queue;
+ int res;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq) {
+ res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+ if (res < 0)
+ octeontx_log_err("failed stop tx_queue(%d)\n",
+ txq->queue_id);
+
+ rte_free(txq);
+ }
+}
+
+static int
+octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct octeontx_txq *txq = NULL;
+ uint16_t dq_num;
+ int res = 0;
+
+ RTE_SET_USED(nb_desc);
+ RTE_SET_USED(socket_id);
+ RTE_SET_USED(tx_conf);
+
+ dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->tx_queues[qidx] != NULL) {
+ PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
+ qidx);
+ octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+ dev->data->tx_queues[qidx] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (txq == NULL) {
+ octeontx_log_err("failed to allocate txq=%d", qidx);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ txq->eth_dev = dev;
+ txq->queue_id = dq_num;
+ dev->data->tx_queues[qidx] = txq;
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ res = octeontx_pko_channel_query_dqs(nic->base_ochan,
+ &txq->dq,
+ sizeof(octeontx_dq_t),
+ txq->queue_id,
+ octeontx_dq_info_getter);
+ if (res < 0) {
+ res = -EFAULT;
+ goto err;
+ }
+
+ PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
+ qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
+ txq->dq.ioreg_va,
+ txq->dq.fc_status_va);
+
+ return res;
+
+err:
+ if (txq)
+ rte_free(txq);
+
+ return res;
+}
+
+static int
+octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct octeontx_nic *nic = octeontx_pmd_priv(dev);
+ struct rte_mempool_ops *mp_ops = NULL;
+ struct octeontx_rxq *rxq = NULL;
+ pki_pktbuf_cfg_t pktbuf_conf;
+ pki_hash_cfg_t pki_hash;
+ pki_qos_cfg_t pki_qos;
+ uintptr_t pool;
+ int ret, port;
+ uint8_t gaura;
+ unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
+ unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
+
+ RTE_SET_USED(nb_desc);
+
+ memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
+ memset(&pki_hash, 0, sizeof(pki_hash));
+ memset(&pki_qos, 0, sizeof(pki_qos));
+
+ mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
+ if (strcmp(mp_ops->name, "octeontx_fpavf")) {
+ octeontx_log_err("failed to find octeontx_fpavf mempool");
+ return -ENOTSUP;
+ }
+
+ /* Handle forbidden configurations */
+ if (nic->pki.classifier_enable) {
+ octeontx_log_err("cannot setup queue %d. "
+ "Classifier option unsupported", qidx);
+ return -EINVAL;
+ }
+
+ port = nic->port_id;
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ octeontx_log_err("rx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Verify queue index */
+ if (qidx >= dev->data->nb_rx_queues) {
+ octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+ qidx, (dev->data->nb_rx_queues - 1));
+ return -ENOTSUP;
+ }
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY &&
+ socket_id != (unsigned int)nic->node)
+ PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Allocating rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rxq == NULL) {
+ octeontx_log_err("failed to allocate rxq=%d", qidx);
+ return -ENOMEM;
+ }
+
+ if (!nic->pki.initialized) {
+ pktbuf_conf.port_type = 0;
+ pki_hash.port_type = 0;
+ pki_qos.port_type = 0;
+
+ pktbuf_conf.mmask.f_wqe_skip = 1;
+ pktbuf_conf.mmask.f_first_skip = 1;
+ pktbuf_conf.mmask.f_later_skip = 1;
+ pktbuf_conf.mmask.f_mbuff_size = 1;
+ pktbuf_conf.mmask.f_cache_mode = 1;
+
+ pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
+ pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
+ pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
+ pktbuf_conf.mbuff_size = (mb_pool->elt_size -
+ RTE_PKTMBUF_HEADROOM -
+ sizeof(struct rte_mbuf));
+
+ pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
+
+ ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
+ if (ret != 0) {
+ octeontx_log_err("fail to configure pktbuf for port %d",
+ port);
+ rte_free(rxq);
+ return ret;
+ }
+ PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
+ "\tmbuf_size:\t0x%0x\n"
+ "\twqe_skip:\t0x%0x\n"
+ "\tfirst_skip:\t0x%0x\n"
+ "\tlater_skip:\t0x%0x\n"
+ "\tcache_mode:\t%s\n",
+ port,
+ pktbuf_conf.mbuff_size,
+ pktbuf_conf.wqe_skip,
+ pktbuf_conf.first_skip,
+ pktbuf_conf.later_skip,
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STT) ?
+ "STT" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF) ?
+ "STF" :
+ (pktbuf_conf.cache_mode ==
+ PKI_OPC_MODE_STF1_STT) ?
+ "STF1_STT" : "STF2_STT");
+
+ if (nic->pki.hash_enable) {
+ pki_hash.tag_dlc = 1;
+ pki_hash.tag_slc = 1;
+ pki_hash.tag_dlf = 1;
+ pki_hash.tag_slf = 1;
+ pki_hash.tag_prt = 1;
+ octeontx_pki_port_hash_config(port, &pki_hash);
+ }
+
+ pool = (uintptr_t)mb_pool->pool_id;
+
+ /* Get the gpool Id */
+ gaura = octeontx_fpa_bufpool_gpool(pool);
+
+ pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
+ pki_qos.num_entry = 1;
+ pki_qos.drop_policy = 0;
+ pki_qos.tag_type = 0L;
+ pki_qos.qos_entry[0].port_add = 0;
+ pki_qos.qos_entry[0].gaura = gaura;
+ pki_qos.qos_entry[0].ggrp_ok = ev_queues;
+ pki_qos.qos_entry[0].ggrp_bad = ev_queues;
+ pki_qos.qos_entry[0].grptag_bad = 0;
+ pki_qos.qos_entry[0].grptag_ok = 0;
+
+ ret = octeontx_pki_port_create_qos(port, &pki_qos);
+ if (ret < 0) {
+ octeontx_log_err("failed to create QOS port=%d, q=%d",
+ port, qidx);
+ rte_free(rxq);
+ return ret;
+ }
+ nic->pki.initialized = true;
+ }
+
+ rxq->port_id = nic->port_id;
+ rxq->eth_dev = dev;
+ rxq->queue_id = qidx;
+ rxq->evdev = nic->evdev;
+ rxq->ev_queues = ev_queues;
+ rxq->ev_ports = ev_ports;
+
+ dev->data->rx_queues[qidx] = rxq;
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+octeontx_dev_rx_queue_release(void *rxq)
+{
+ rte_free(rxq);
+}
+
+static const uint32_t *
+octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == octeontx_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops octeontx_dev_ops = {
+ .dev_configure = octeontx_dev_configure,
+ .dev_infos_get = octeontx_dev_info,
+ .dev_close = octeontx_dev_close,
+ .dev_start = octeontx_dev_start,
+ .dev_stop = octeontx_dev_stop,
+ .promiscuous_enable = octeontx_dev_promisc_enable,
+ .promiscuous_disable = octeontx_dev_promisc_disable,
+ .link_update = octeontx_dev_link_update,
+ .stats_get = octeontx_dev_stats_get,
+ .stats_reset = octeontx_dev_stats_reset,
+ .mac_addr_set = octeontx_dev_default_mac_addr_set,
+ .tx_queue_start = octeontx_dev_tx_queue_start,
+ .tx_queue_stop = octeontx_dev_tx_queue_stop,
+ .tx_queue_setup = octeontx_dev_tx_queue_setup,
+ .tx_queue_release = octeontx_dev_tx_queue_release,
+ .rx_queue_setup = octeontx_dev_rx_queue_setup,
+ .rx_queue_release = octeontx_dev_rx_queue_release,
+ .dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
+};
+
+/* Create Ethdev interface per BGX LMAC ports */
+static int
+octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
+ int socket_id)
+{
+ int res;
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct octeontx_nic *nic = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct rte_eth_dev_data *data = NULL;
+ const char *name = rte_vdev_device_name(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ sprintf(octtx_name, "%s_%d", name, port);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_attach_secondary(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
+ eth_dev->rx_pkt_burst = octeontx_recv_pkts;
+ return 0;
+ }
+
+ data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
+ if (data == NULL) {
+ octeontx_log_err("failed to allocate devdata");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
+ if (nic == NULL) {
+ octeontx_log_err("failed to allocate nic structure");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ nic->port_id = port;
+ nic->evdev = evdev;
+
+ res = octeontx_port_open(nic);
+ if (res < 0)
+ goto err;
+
+ /* Rx side port configuration */
+ res = octeontx_pki_port_open(port);
+ if (res != 0) {
+ octeontx_log_err("failed to open PKI port %d", port);
+ res = -ENODEV;
+ goto err;
+ }
+
+ /* Reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(octtx_name);
+ if (eth_dev == NULL) {
+ octeontx_log_err("failed to allocate rte_eth_dev");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = NULL;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->numa_node = dev->device.numa_node;
+
+ rte_memcpy(data, (eth_dev)->data, sizeof(*data));
+ data->dev_private = nic;
+
+ data->port_id = eth_dev->data->port_id;
+ snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
+
+ nic->ev_queues = 1;
+ nic->ev_ports = 1;
+
+ data->dev_link.link_status = ETH_LINK_DOWN;
+ data->dev_started = 0;
+ data->promiscuous = 0;
+ data->all_multicast = 0;
+ data->scattered_rx = 0;
+
+ data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
+ socket_id);
+ if (data->mac_addrs == NULL) {
+ octeontx_log_err("failed to allocate memory for mac_addrs");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ eth_dev->data = data;
+ eth_dev->dev_ops = &octeontx_dev_ops;
+
+ /* Finally save ethdev pointer to the NIC structure */
+ nic->dev = eth_dev;
+
+ if (nic->port_id != data->port_id) {
+ octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
+ data->port_id, nic->port_id);
+ res = -EINVAL;
+ goto err;
+ }
+
+ /* Update port_id mac to eth_dev */
+ memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
+
+ PMD_INIT_LOG(DEBUG, "ethdev info: ");
+ PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
+ nic->port_id, nic->port_ena,
+ nic->base_ochan, nic->num_ochans,
+ nic->num_tx_queues);
+ PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
+
+ return data->port_id;
+
+err:
+ if (port)
+ octeontx_port_close(nic);
+
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_free(data);
+ rte_free(nic);
+ rte_eth_dev_release_port(eth_dev);
+ }
+
+ return res;
+}
+
+/* Un initialize octeontx device */
+static int
+octeontx_remove(struct rte_vdev_device *dev)
+{
+ char octtx_name[OCTEONTX_MAX_NAME_LEN];
+ struct rte_eth_dev *eth_dev = NULL;
+ struct octeontx_nic *nic = NULL;
+ int i;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
+ sprintf(octtx_name, "eth_octeontx_%d", i);
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(octtx_name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ nic = octeontx_pmd_priv(eth_dev);
+ rte_event_dev_stop(nic->evdev);
+ PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
+
+ rte_free(eth_dev->data->mac_addrs);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+ rte_eth_dev_release_port(eth_dev);
+ rte_event_dev_close(nic->evdev);
+ }
+
+ /* Free FC resource */
+ octeontx_pko_fc_free();
+
+ return 0;
+}
+
+/* Initialize octeontx device */
+static int
+octeontx_probe(struct rte_vdev_device *dev)
+{
+ const char *dev_name;
+ static int probe_once;
+ uint8_t socket_id, qlist;
+ int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
+ struct rte_event_dev_config dev_conf;
+ const char *eventdev_name = "event_octeontx";
+ struct rte_event_dev_info info;
+
+ struct octeontx_vdev_init_params init_params = {
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
+ };
+
+ dev_name = rte_vdev_device_name(dev);
+ res = octeontx_parse_vdev_init_params(&init_params, dev);
+ if (res < 0)
+ return -EINVAL;
+
+ if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
+ octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
+ OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
+ return -ENOTSUP;
+ }
+
+ PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
+
+ socket_id = rte_socket_id();
+
+ tx_vfcnt = octeontx_pko_vf_count();
+
+ if (tx_vfcnt < init_params.nr_port) {
+ octeontx_log_err("not enough PKO (%d) for port number (%d)",
+ tx_vfcnt, init_params.nr_port);
+ return -EINVAL;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ octeontx_log_err("eventdev %s not found", eventdev_name);
+ return -ENODEV;
+ }
+
+ res = rte_event_dev_info_get(evdev, &info);
+ if (res < 0) {
+ octeontx_log_err("failed to eventdev info %d", res);
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
+ info.max_event_queues, info.max_event_ports);
+
+ if (octeontx_pko_init_fc(tx_vfcnt))
+ return -ENOMEM;
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ res = rte_event_dev_configure(evdev, &dev_conf);
+ if (res < 0)
+ goto parse_error;
+
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ (uint32_t *)&pnum);
+ rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ (uint32_t *)&qnum);
+ if (pnum < qnum) {
+ octeontx_log_err("too few event ports (%d) for event_q(%d)",
+ pnum, qnum);
+ res = -EINVAL;
+ goto parse_error;
+ }
+ if (pnum > qnum) {
+ /*
+ * We don't poll on event ports
+ * that do not have any queues assigned.
+ */
+ pnum = qnum;
+ PMD_INIT_LOG(INFO,
+ "reducing number of active event ports to %d", pnum);
+ }
+ for (i = 0; i < qnum; i++) {
+ res = rte_event_queue_setup(evdev, i, NULL);
+ if (res < 0) {
+ octeontx_log_err("failed to setup event_q(%d): res %d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ for (i = 0; i < pnum; i++) {
+ res = rte_event_port_setup(evdev, i, NULL);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to setup ev port(%d) res=%d",
+ i, res);
+ goto parse_error;
+ }
+ /* Link one queue to one event port */
+ qlist = i;
+ res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
+ if (res < 0) {
+ res = -ENODEV;
+ octeontx_log_err("failed to link port (%d): res=%d",
+ i, res);
+ goto parse_error;
+ }
+ }
+
+ /* Create ethdev interface */
+ for (i = 0; i < init_params.nr_port; i++) {
+ port_id = octeontx_create(dev, i, evdev, socket_id);
+ if (port_id < 0) {
+ octeontx_log_err("failed to create device %s",
+ dev_name);
+ res = -ENODEV;
+ goto parse_error;
+ }
+
+ PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
+ port_id);
+ }
+
+ if (probe_once) {
+ octeontx_log_err("interface %s not supported", dev_name);
+ octeontx_remove(dev);
+ res = -ENOTSUP;
+ goto parse_error;
+ }
+ probe_once = 1;
+
+ return 0;
+
+parse_error:
+ octeontx_pko_fc_free();
+ return res;
+}
+
+static struct rte_vdev_driver octeontx_pmd_drv = {
+ .probe = octeontx_probe,
+ .remove = octeontx_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
+RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
new file mode 100644
index 00000000..c47d4c6d
--- /dev/null
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -0,0 +1,109 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __OCTEONTX_ETHDEV_H__
+#define __OCTEONTX_ETHDEV_H__
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_memory.h>
+
+#include <octeontx_fpavf.h>
+
+#include "base/octeontx_bgx.h"
+#include "base/octeontx_pki_var.h"
+#include "base/octeontx_pkivf.h"
+#include "base/octeontx_pkovf.h"
+#include "base/octeontx_io.h"
+
+#define OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT 12
+#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
+#define OCTEONTX_MAX_NAME_LEN 32
+
+static inline struct octeontx_nic *
+octeontx_pmd_priv(struct rte_eth_dev *dev)
+{
+ return dev->data->dev_private;
+}
+
+/* Octeontx ethdev nic */
+struct octeontx_nic {
+ struct rte_eth_dev *dev;
+ int node;
+ int port_id;
+ int port_ena;
+ int base_ichan;
+ int num_ichans;
+ int base_ochan;
+ int num_ochans;
+ uint8_t evdev;
+ uint8_t bpen;
+ uint8_t fcs_strip;
+ uint8_t bcast_mode;
+ uint8_t mcast_mode;
+ uint16_t num_tx_queues;
+ uint64_t hwcap;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint8_t speed;
+ uint16_t mtu;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ /* Rx port parameters */
+ struct {
+ bool classifier_enable;
+ bool hash_enable;
+ bool initialized;
+ } pki;
+
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+} __rte_cache_aligned;
+
+struct octeontx_txq {
+ uint16_t queue_id;
+ octeontx_dq_t dq;
+ struct rte_eth_dev *eth_dev;
+} __rte_cache_aligned;
+
+struct octeontx_rxq {
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint8_t evdev;
+ struct rte_eth_dev *eth_dev;
+ uint16_t ev_queues;
+ uint16_t ev_ports;
+} __rte_cache_aligned;
+
+#endif /* __OCTEONTX_ETHDEV_H__ */
diff --git a/drivers/net/octeontx/octeontx_logs.h b/drivers/net/octeontx/octeontx_logs.h
new file mode 100644
index 00000000..d5da7331
--- /dev/null
+++ b/drivers/net/octeontx/octeontx_logs.h
@@ -0,0 +1,76 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __OCTEONTX_LOGS_H__
+#define __OCTEONTX_LOGS_H__
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_MBOX
+#define PMD_MBOX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#define octeontx_log_err(s, ...) PMD_INIT_LOG(ERR, s, ##__VA_ARGS__)
+#define octeontx_log_dbg(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+#define octeontx_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#endif /* __OCTEONTX_LOGS_H__*/
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
new file mode 100644
index 00000000..c97d5b35
--- /dev/null
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -0,0 +1,127 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+
+#include "octeontx_ethdev.h"
+#include "octeontx_rxtx.h"
+#include "octeontx_logs.h"
+
+
+static __rte_always_inline uint16_t __hot
+__octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
+ struct rte_mbuf *tx_pkt)
+{
+ uint64_t cmd_buf[4];
+ uint16_t gaura_id;
+
+ if (unlikely(*((volatile int64_t *)fc_status_va) < 0))
+ return -ENOSPC;
+
+ /* Get the gaura Id */
+ gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
+
+ /* Setup PKO_SEND_HDR_S */
+ cmd_buf[0] = tx_pkt->data_len & 0xffff;
+ cmd_buf[1] = 0x0;
+
+ /* Set don't free bit if reference count > 1 */
+ if (rte_mbuf_refcnt_read(tx_pkt) > 1)
+ cmd_buf[0] |= (1ULL << 58); /* SET DF */
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
+ cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+ tx_pkt->data_len;
+
+ octeontx_reg_lmtst(lmtline_va, ioreg_va, cmd_buf, PKO_CMD_SZ);
+
+ return 0;
+}
+
+uint16_t __hot
+octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int count;
+ struct octeontx_txq *txq = tx_queue;
+ octeontx_dq_t *dq = &txq->dq;
+ int res;
+
+ count = 0;
+
+ while (count < nb_pkts) {
+ res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
+ dq->fc_status_va,
+ tx_pkts[count]);
+ if (res < 0)
+ break;
+
+ count++;
+ }
+
+ return count; /* return number of pkts transmitted */
+}
+
+uint16_t __hot
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct octeontx_rxq *rxq;
+ struct rte_event ev;
+ size_t count;
+ uint16_t valid_event;
+
+ rxq = rx_queue;
+ count = 0;
+ while (count < nb_pkts) {
+ valid_event = rte_event_dequeue_burst(rxq->evdev,
+ rxq->ev_ports, &ev,
+ 1, 0);
+ if (!valid_event)
+ break;
+ rx_pkts[count++] = ev.mbuf;
+ }
+
+ return count; /* return number of pkts received */
+}
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
new file mode 100644
index 00000000..382ff2b2
--- /dev/null
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -0,0 +1,137 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_RXTX_H__
+#define __OCTEONTX_RXTX_H__
+
+#include <rte_ethdev.h>
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* Packet type table */
+#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+ [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+ [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+ [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV4_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV4_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV4_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+ [LC_IPV6_OPT][LE_NONE][LF_NONE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+ [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+ [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [LC_IPV6_OPT][LE_NONE][LF_TCP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [LC_IPV6_OPT][LE_NONE][LF_GRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+ [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+ [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
+uint16_t
+octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+
+uint16_t
+octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+#endif /* __OCTEONTX_RXTX_H__ */
diff --git a/drivers/net/octeontx/rte_pmd_octeontx_version.map b/drivers/net/octeontx/rte_pmd_octeontx_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/drivers/net/octeontx/rte_pmd_octeontx_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile
index 7ebd0bef..b6487d42 100644
--- a/drivers/net/pcap/Makefile
+++ b/drivers/net/pcap/Makefile
@@ -40,6 +40,9 @@ LIB = librte_pmd_pcap.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lpcap
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_pcap_version.map
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index defb3b41..5a86752f 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -44,7 +44,7 @@
#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
@@ -75,7 +75,7 @@ struct queue_stat {
struct pcap_rx_queue {
pcap_t *pcap;
- uint8_t in_port;
+ uint16_t in_port;
struct rte_mempool *mb_pool;
struct queue_stat rx_stat;
char name[PATH_MAX];
@@ -411,11 +411,13 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
/* The dumper is created using the previous pcap_t reference */
*dumper = pcap_dump_open(tx_pcap, pcap_filename);
if (*dumper == NULL) {
+ pcap_close(tx_pcap);
RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
pcap_filename);
return -1;
}
+ pcap_close(tx_pcap);
return 0;
}
@@ -560,7 +562,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
@@ -592,6 +594,8 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
stats->oerrors = tx_packets_err_total;
+
+ return 0;
}
static void
@@ -838,7 +842,6 @@ pmd_init_internals(struct rte_vdev_device *vdev,
*/
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
return 0;
}
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index f03441d9..ccbffa45 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -13,6 +13,9 @@ LIB = librte_pmd_qede.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_qede_version.map
@@ -69,8 +72,9 @@ CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
endif
-else
-CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
+else #ICC
+CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
+CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant
endif
#
@@ -78,7 +82,7 @@ endif
# to disable warnings in them
#
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2603a8b3..fe42f325 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -144,12 +144,12 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
@@ -182,12 +182,12 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated aligned dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
@@ -196,7 +196,7 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
uint16_t j;
for (j = 0 ; j < ecore_mz_count; j++) {
- if (phys == ecore_mz_mapping[j]->phys_addr) {
+ if (phys == ecore_mz_mapping[j]->iova) {
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
@@ -292,3 +292,15 @@ qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
}
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ int i;
+
+ while (length--) {
+ crc ^= *ptr++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 3acf8f7c..52c2f0ec 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -23,6 +23,7 @@
/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
+struct ecore_ptt;
struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
@@ -45,6 +46,8 @@ void qed_link_update(struct ecore_hwfn *hwfn);
#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
+#define UNUSED(x) (void)(x)
+
/* Memory Types */
typedef uint8_t u8;
typedef uint16_t u16;
@@ -60,7 +63,7 @@ typedef u32 OSAL_BE32;
#define osal_uintptr_t uintptr_t
-typedef phys_addr_t dma_addr_t;
+typedef rte_iova_t dma_addr_t;
typedef rte_spinlock_t osal_spinlock_t;
@@ -147,6 +150,9 @@ void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
(_db_addr)), (u32)_val)
+#define DIRECT_REG_WR64(hwfn, addr, value) nothing
+#define DIRECT_REG_RD64(hwfn, addr) 0
+
/* Mutexes */
typedef pthread_mutex_t osal_mutex_t;
@@ -161,7 +167,12 @@ typedef pthread_mutex_t osal_mutex_t;
#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
-#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) \
+ do { \
+ UNUSED(lock); \
+ flags = 0; \
+ UNUSED(flags); \
+ } while (0)
#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
@@ -328,6 +339,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_TRANSCEIVER_UPDATE(hwfn) nothing
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
/* SR-IOV channel */
@@ -344,8 +356,9 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
-#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) 0
-#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing
+#define OSAL_IOV_VF_VPORT_STOP(hwfn, vf) nothing
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
@@ -365,7 +378,7 @@ void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
qede_hw_err_notify(hwfn, err_type)
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
-#define OSAL_NUM_ACTIVE_CPU() 0
+#define OSAL_NUM_CPUS() 0
/* Utility functions */
@@ -414,7 +427,9 @@ u32 qede_osal_log2(u32);
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
(void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
#define OSAL_PAGE_SIZE 4096
+#define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
#define OSAL_IOMEM volatile
+#define OSAL_UNUSED __attribute__((unused))
#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
#define OSAL_MIN_T(type, __min1, __min2) \
((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
@@ -427,10 +442,17 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
-#define OSAL_CRC32(crc, buf, length) 0
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
+#define OSAL_CRC32(crc, buf, length) qede_crc32(crc, buf, length)
#define OSAL_CRC8_POPULATE(table, polynomial) nothing
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
-#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
+
+#define OSAL_DIV_S64(a, b) ((a) / (b))
+#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing
+
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index bfe50e1f..9a6059ac 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 20
-#define FW_REVISION_VERSION 0
+#define FW_MINOR_VERSION 30
+#define FW_REVISION_VERSION 12
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -106,73 +106,70 @@
/***********************/
/* PCI functions */
-#define MAX_NUM_PORTS_K2 (4)
-#define MAX_NUM_PORTS_BB (2)
-#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
-
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-
-#define MAX_NUM_VFS_BB (120)
-#define MAX_NUM_VFS_K2 (192)
-#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
-#define COMMON_MAX_NUM_VFS (240)
-
-#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_E5 (4)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
+
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_E5 (16)
+#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E5 (240)
+#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all
* possible PFs and VFs - we need a constant for this size
*/
-#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
-
-#define MAX_NUM_VPORTS_K2 (208)
-#define MAX_NUM_VPORTS_BB (160)
-#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E5 (256)
+#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
-#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
-#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_E5 (320) /* TODO_E5_VITALY - fix to 512 */
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_E5)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
-/* 4-Port K2. */
-#define NUM_PHYS_TCS_4PORT_K2 (4)
-#define NUM_OF_PHYS_TCS (8)
-
-#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
-#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
-
-#define LB_TC (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO (8)
-
-#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+#define NUM_PHYS_TCS_4PORT_K2 4
+#define NUM_PHYS_TCS_4PORT_TX_E5 6
+#define NUM_PHYS_TCS_4PORT_RX_E5 4
+#define NUM_OF_PHYS_TCS 8
+#define PURE_LB_TC NUM_OF_PHYS_TCS
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_TCS_4PORT_TX_E5 (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
+#define NUM_TCS_4PORT_RX_E5 (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define E4_NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_TASK_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4 (375e6)
-#define STORM_CLK_FREQ_E4 (1000e6)
-#define CLK25M_CLK_FREQ_E4 (25e6)
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES_E5 (16)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
-#define NUM_OF_GTT 19
-#define GTT_DWORD_SIZE_BITS 10
-#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
-#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
/* Tools Version */
#define TOOLS_VERSION 10
@@ -417,49 +414,51 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB 12
+#define PIS_PER_SB_E4 12
+#define PIS_PER_SB_E5 8
+#define MAX_PIS_PER_SB_E4 OSAL_MAX_T(PIS_PER_SB_E4, PIS_PER_SB_E5)
/* fsm is stopped or not valid for this sb */
-#define CAU_HC_STOPPED_STATE 3
+#define CAU_HC_STOPPED_STATE 3
/* fsm is working without interrupt coalescing for this sb*/
-#define CAU_HC_DISABLE_STATE 4
+#define CAU_HC_DISABLE_STATE 4
/* fsm is working with interrupt coalescing for this sb*/
-#define CAU_HC_ENABLE_STATE 0
+#define CAU_HC_ENABLE_STATE 0
/*****************/
/* IGU CONSTANTS */
/*****************/
-#define MAX_SB_PER_PATH_K2 (368)
-#define MAX_SB_PER_PATH_BB (288)
-#define MAX_TOT_SB_PER_PATH \
- MAX_SB_PER_PATH_K2
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_SB_PER_PATH_E5 (512)
+#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_E5
-#define MAX_SB_PER_PF_MIMD 129
-#define MAX_SB_PER_PF_SIMD 64
-#define MAX_SB_PER_VF 64
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
/* Memory addresses on the BAR for the IGU Sub Block */
-#define IGU_MEM_BASE 0x0000
+#define IGU_MEM_BASE 0x0000
-#define IGU_MEM_MSIX_BASE 0x0000
-#define IGU_MEM_MSIX_UPPER 0x0101
-#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
-#define IGU_MEM_PBA_MSIX_BASE 0x0200
-#define IGU_MEM_PBA_MSIX_UPPER 0x0202
-#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
-#define IGU_CMD_INT_ACK_BASE 0x0400
-#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - \
- 1)
-#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
-#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
-#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
-#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
@@ -467,8 +466,8 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
-#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - \
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE + \
+ MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
@@ -491,16 +490,16 @@
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
-#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
#define PXP_PF_WINDOW_ADMIN_START 0
#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
- PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
PXP_PER_PF_ENTRY_SIZE)
-#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
- PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
PXP_GLOBAL_ENTRY_SIZE)
@@ -575,19 +574,79 @@
#define PXP_BAR0_FIRST_INVALID_ADDRESS \
(PXP_BAR0_END_PSDM + 1)
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
-
-/* ILT Records */
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU \
+ (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+ (PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+ (PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+ (PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+ (PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+ (PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+ (PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+ (PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC \
+ (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 \
+ (PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+// ILT Records
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
-#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-
+#define MAX_NUM_ILT_RECORDS \
+ OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-/* Host Interface */
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+#define PXP_NUM_ILT_RECORDS_E5 13664
+// Host Interface
+#define PXP_QUEUES_ZONE_MAX_NUM_E4 320
+#define PXP_QUEUES_ZONE_MAX_NUM_E5 512
/*****************/
@@ -635,7 +694,8 @@
/******************/
/* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES_E4 3328
+#define PBF_MAX_CMD_LINES_E5 5280
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
@@ -645,17 +705,6 @@
/*****************/
#define PRS_GFT_CAM_LINES_NO_MATCH 31
-/* Async data KCQ CQE */
-struct async_data {
- /* Context ID of the connection */
- __le32 cid;
- /* Task Id of the task (for error that happened on a a task) */
- __le16 itid;
- /* error code - relevant only if the opcode indicates its an error */
- u8 error_code;
- /* internal fw debug parameter */
- u8 fw_debug_param;
-};
/*
* Interrupt coalescing TimeSet
@@ -683,22 +732,29 @@ struct eth_rx_prod_data {
__le16 cqe_prod /* CQE producer. */;
};
-struct regpair {
- __le32 lo /* low word for reg-pair */;
- __le32 hi /* high word for reg-pair */;
+
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
-/*
- * Event Ring VF-PF Channel data
- */
-struct vf_pf_channel_eqe_data {
- struct regpair msg_addr /* VF-PF message address */;
+struct iscsi_connect_done_results {
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+/* decided tcp params after connect done */
+ struct tcp_ulp_connect_done_params params;
};
+
struct iscsi_eqe_data {
- __le32 cid /* Context ID of the connection */;
- /* Task Id of the task (for error that happened on a a task) */;
- __le16 conn_id;
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+ __le16 reserved;
/* error code - relevant only if the opcode indicates its an error */
u8 error_code;
u8 error_pdu_opcode_reserved;
@@ -714,52 +770,10 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
-/*
- * Event Ring malicious VF data
- */
-struct malicious_vf_eqe_data {
- u8 vfId /* Malicious VF ID */;
- u8 errId /* Malicious VF error */;
- __le16 reserved[3];
-};
/*
- * Event Ring initial cleanup data
+ * Multi function mode
*/
-struct initial_cleanup_eqe_data {
- u8 vfId /* VF ID */;
- u8 reserved[7];
-};
-
-/*
- * Event Data Union
- */
-union event_ring_data {
- u8 bytes[8] /* Byte Array */;
- struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
- struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- struct regpair roceHandle /* Dedicated field for RDMA data */;
- struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
- struct initial_cleanup_eqe_data vf_init_cleanup
- /* VF Initial Cleanup data */;
-};
-/* Event Ring Entry */
-struct event_ring_entry {
- u8 protocol_id /* Event Protocol ID */;
- u8 opcode /* Event Opcode */;
- __le16 reserved0 /* Reserved */;
- __le16 echo /* Echo value from ramrod data on the host */;
- u8 fw_return_code /* FW return code for SP ramrods */;
- u8 flags;
-/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
-#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
-#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
- union event_ring_data data;
-};
-
-/* Multi function mode */
enum mf_mode {
ERROR_MODE /* Unsupported mode */,
MF_OVLAN /* Multi function based on outer VLAN */,
@@ -783,6 +797,12 @@ enum protocol_type {
};
+struct regpair {
+ __le32 lo /* low word for reg-pair */;
+ __le32 hi /* high word for reg-pair */;
+};
+
+
/*
* Ustorm Queue Zone
@@ -852,6 +872,18 @@ struct cau_sb_entry {
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+
/* core doorbell data */
struct core_db_data {
u8 params;
@@ -1008,7 +1040,7 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
/* Connection type is iWARP */
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
@@ -1072,9 +1104,9 @@ enum igu_seg_access {
* to the last-ethertype)
*/
enum l3_type {
- e_l3Type_unknown,
- e_l3Type_ipv4,
- e_l3Type_ipv6,
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
MAX_L3_TYPE
};
@@ -1085,9 +1117,9 @@ enum l3_type {
* first fragment, the protocol-type should be set to none.
*/
enum l4_protocol {
- e_l4Protocol_none,
- e_l4Protocol_tcp,
- e_l4Protocol_udp,
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
MAX_L4_PROTOCOL
};
@@ -1311,260 +1343,230 @@ struct pxp_vf_zone_a_permission {
* Rdif context
*/
struct rdif_task_context {
- __le32 initialRefTag;
- __le16 appTagValue;
- __le16 appTagMask;
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
/* 0 = IP checksum, 1 = CRC */
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
/* 1/2/3 - Protection Type */
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
/* 0=0x0000, 1=0xffff */
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
/* Keep reference tag constant */
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
- u8 partialDifData[7];
- __le16 partialCrcValue;
- __le16 partialChecksumValue;
- __le32 offsetInIO;
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 partial_dif_data[7];
+ __le16 partial_crc_value;
+ __le16 partial_checksum_value;
+ __le32 offset_in_io;
__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
/* 0=None, 1=DIF, 2=DIX */
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
/* DIF tag right at the beginning of DIF interval */
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
/* 0=None, 1=DIF */
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
/* Forward application tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
/* Forward reference tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
/* mask for refernce tag handling */
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
-/* RSS hash type */
+/*
+ * RSS hash type
+ */
enum rss_hash_type {
- RSS_HASH_TYPE_DEFAULT = 0,
- RSS_HASH_TYPE_IPV4 = 1,
- RSS_HASH_TYPE_TCP_IPV4 = 2,
- RSS_HASH_TYPE_IPV6 = 3,
- RSS_HASH_TYPE_TCP_IPV6 = 4,
- RSS_HASH_TYPE_UDP_IPV4 = 5,
- RSS_HASH_TYPE_UDP_IPV6 = 6,
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
-/* status block structure */
-struct status_block {
- __le16 pi_array[PIS_PER_SB];
- __le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+/*
+ * status block structure
+ */
+struct status_block_e4 {
+ __le16 pi_array[PIS_PER_SB_E4];
+ __le32 sb_num;
+#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
-/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC 0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC \
-(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU 0
-#define PXP_VF_BAR0_IGU_LENGTH 0x3000
-#define PXP_VF_BAR0_END_IGU \
-(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ 0x3000
-#define PXP_VF_BAR0_DQ_LENGTH 0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
-(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
-#define PXP_VF_BAR0_END_DQ \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B \
-(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B \
-(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B \
-(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B \
-(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B \
-(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B \
-(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+/*
+ * status block structure
+ */
+struct status_block_e5 {
+ __le16 pi_array[PIS_PER_SB_E5];
+ __le32 sb_num;
+#define STATUS_BLOCK_E5_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E5_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E5_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_E5_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT 24
+};
+
/*
* Tdif context
*/
struct tdif_task_context {
- __le32 initialRefTag;
- __le16 appTagValue;
- __le16 appTagMask;
- __le16 partialCrcValueB;
- __le16 partialChecksumValueB;
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
/* 0 = IP checksum, 1 = CRC */
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
/* 1/2/3 - Protection Type */
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
/* 0=0x0000, 1=0xffff */
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
/* 0=None, 1=DIF, 2=DIX */
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
/* DIF tag right at the beginning of DIF interval */
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-/* reserved */
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
/* 0=None, 1=DIF */
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
/* mask for refernce tag handling */
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
/* Forward application tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
/* Forward reference tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
/* Keep reference tag constant */
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
- __le32 offsetInIOB;
- __le16 partialCrcValueA;
- __le16 partialChecksumValueA;
- __le32 offsetInIOA;
- u8 partialDifDataA[8];
- u8 partialDifDataB[8];
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
+ __le16 partial_crc_value_a;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
+ u8 partial_dif_data_a[8];
+ u8 partial_dif_data_b[8];
};
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 0d68a9bc..ce5f3a90 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -29,9 +29,9 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 18
-#define ECORE_REVISION_VERSION 7
-#define ECORE_ENGINEERING_VERSION 1
+#define ECORE_MINOR_VERSION 30
+#define ECORE_REVISION_VERSION 8
+#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
@@ -50,6 +50,7 @@
#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
/* Constants */
#define ECORE_WID_SIZE (1024)
+#define ECORE_MIN_WIDS (4)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
@@ -66,6 +67,7 @@ enum ecore_nvm_cmd {
ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+ ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
@@ -97,16 +99,16 @@ do { \
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
-#endif
-#define ECORE_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
-#define ECORE_MFW_SET_FIELD(name, field, value) \
+#define SET_MFW_FIELD(name, field, value) \
do { \
- (name) &= ~(field ## _MASK); \
- (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+ (name) &= ~((field ## _MASK)); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
} while (0)
+#endif
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
@@ -279,7 +281,6 @@ struct ecore_qm_iids {
* is received from MFW.
*/
enum ecore_resources {
- ECORE_SB,
ECORE_L2_QUEUE,
ECORE_VPORT,
ECORE_RSS_ENG,
@@ -293,7 +294,13 @@ enum ecore_resources {
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
ECORE_BDQ,
- ECORE_MAX_RESC, /* must be last */
+
+ /* This is needed only internally for matching against the IGU.
+ * In case of legacy MFW, would be set to `0'.
+ */
+ ECORE_SB,
+
+ ECORE_MAX_RESC,
};
/* Features that require resources, given as input to the resource management
@@ -345,22 +352,32 @@ enum ecore_hw_err_type {
};
#endif
+enum ecore_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
-#define ECORE_IS_RDMA_PERSONALITY(dev) \
- ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_ROCE_PERSONALITY(dev) \
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_IWARP_PERSONALITY(dev) \
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_L2_PERSONALITY(dev) \
+#define ECORE_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH || \
ECORE_IS_RDMA_PERSONALITY(dev))
+#define ECORE_IS_FCOE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_FCOE)
+#define ECORE_IS_ISCSI_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ISCSI)
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
@@ -473,6 +490,12 @@ struct ecore_qm_info {
u8 num_pf_rls;
};
+struct ecore_db_recovery_info {
+ osal_list_t list;
+ osal_spinlock_t lock;
+ u32 db_recovery_counter;
+};
+
struct storm_stats {
u32 address;
u32 len;
@@ -488,14 +511,60 @@ struct ecore_fw_data {
u32 init_ops_size;
};
+enum ecore_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ ECORE_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ ECORE_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ ECORE_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ ECORE_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ ECORE_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ ECORE_MF_INTER_PF_SWITCH,
+
+ /* TODO - if we ever re-utilize any of this logic, we can rename */
+ ECORE_MF_UFP_SPECIFIC,
+
+ ECORE_MF_DISABLE_ARFS,
+};
+
+enum ecore_ufp_mode {
+ ECORE_UFP_MODE_ETS,
+ ECORE_UFP_MODE_VNIC_BW,
+};
+
+enum ecore_ufp_pri_type {
+ ECORE_UFP_PRI_OS,
+ ECORE_UFP_PRI_VNIC
+};
+
+struct ecore_ufp_info {
+ enum ecore_ufp_pri_type pri_type;
+ enum ecore_ufp_mode mode;
+ u8 tc;
+};
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
- #define ECORE_PATH_ID(_p_hwfn) \
- (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+#define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
u8 port_id;
bool b_active;
@@ -556,10 +625,6 @@ struct ecore_hwfn {
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
- /* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
-
struct ecore_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
@@ -573,6 +638,7 @@ struct ecore_hwfn {
struct ecore_pf_iov *pf_iov_info;
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
+ struct ecore_ufp_info ufp_info;
struct ecore_dmae_info dmae_info;
@@ -603,17 +669,19 @@ struct ecore_hwfn {
/* L2-related */
struct ecore_l2_info *p_l2_info;
+ /* Mechanism for recovering from doorbell drop */
+ struct ecore_db_recovery_info db_recovery_info;
+
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
};
-#ifndef __EXTRACT__LINUX__
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
+ ECORE_MF_UFP,
};
-#endif
/* @DPDK */
struct ecore_dbg_feature {
@@ -632,15 +700,18 @@ enum qed_dbg_features {
DBG_FEATURE_NUM
};
+enum ecore_dev_type {
+ ECORE_DEV_TYPE_BB,
+ ECORE_DEV_TYPE_AH,
+};
+
struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
- u8 type;
-#define ECORE_DEV_TYPE_BB (0 << 0)
-#define ECORE_DEV_TYPE_AH (1 << 0)
+ enum ecore_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
@@ -653,66 +724,68 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+ u16 vendor_id;
+ u16 device_id;
#define ECORE_DEV_ID_MASK 0xff00
#define ECORE_DEV_ID_MASK_BB 0x1600
#define ECORE_DEV_ID_MASK_AH 0x8000
- u16 vendor_id;
- u16 device_id;
-
u16 chip_num;
- #define CHIP_NUM_MASK 0xffff
- #define CHIP_NUM_SHIFT 16
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 0
- u16 chip_rev;
- #define CHIP_REV_MASK 0xf
- #define CHIP_REV_SHIFT 12
+ u8 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 0
#ifndef ASIC_ONLY
- #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
- #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
- #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
- #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
- CHIP_REV_IS_EMUL_B0(_p_dev))
- #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
- #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
- #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
- CHIP_REV_IS_FPGA_B0(_p_dev))
- #define CHIP_REV_IS_SLOW(_p_dev) \
- (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
- #define CHIP_REV_IS_A0(_p_dev) \
- (CHIP_REV_IS_EMUL_A0(_p_dev) || \
- CHIP_REV_IS_FPGA_A0(_p_dev) || \
- !(_p_dev)->chip_rev)
- #define CHIP_REV_IS_B0(_p_dev) \
- (CHIP_REV_IS_EMUL_B0(_p_dev) || \
- CHIP_REV_IS_FPGA_B0(_p_dev) || \
- (_p_dev)->chip_rev == 1)
- #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) \
+ (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_B0(_p_dev) \
+ (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#else
- #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
- #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_A0(_p_dev) \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)
+#define CHIP_REV_IS_B0(_p_dev) \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)
#endif
- u16 chip_metal;
- #define CHIP_METAL_MASK 0xff
- #define CHIP_METAL_SHIFT 4
+ u8 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 0
- u16 chip_bond_id;
- #define CHIP_BOND_ID_MASK 0xf
- #define CHIP_BOND_ID_SHIFT 0
+ u8 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xff
+#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
- u8 num_ports_in_engines;
+ u8 num_ports;
+ u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
+
+ unsigned long mf_bits;
enum ecore_mf_mode mf_mode;
- #define IS_MF_DEFAULT(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
- #define IS_MF_SI(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
- #define IS_MF_SD(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
int pcie_width;
int pcie_speed;
@@ -744,12 +817,14 @@ struct ecore_dev {
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
struct ecore_tunnel_info tunnel;
bool b_is_vf;
+ bool b_dont_override_vf_msix;
u32 drv_type;
@@ -800,6 +875,8 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
+#define CRC8_TABLE_SIZE 256
+
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@@ -808,8 +885,7 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8
-ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@@ -824,11 +900,12 @@ ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
return sw_fid;
}
-#define PURE_LB_TC 8
#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
u32 min_pf_rate);
int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
@@ -854,6 +931,13 @@ u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
+/* doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec);
+
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
@@ -863,6 +947,4 @@ u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
-const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
-
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 688118bb..50bd66da 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -41,10 +41,7 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
-/* If for some reason, HW P size is modified to be less than 32K,
- * special handling needs to be made for CDU initialization
- */
-#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
@@ -59,8 +56,8 @@
/* connection context union */
union conn_context {
- struct core_conn_context core_ctx;
- struct eth_conn_context eth_ctx;
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
@@ -69,6 +66,7 @@ union type0_task_context {
/* TYPE-1 task context - ROCE */
union type1_task_context {
+ struct regpair reserved; /* @DPDK */
};
struct src_ent {
@@ -230,13 +228,6 @@ struct ecore_cxt_mngr {
/* TODO - VF arfs filters ? */
};
-/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
- enum protocol_type type)
-{
- return type == PROTOCOLID_TOE;
-}
-
static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
{
return type == PROTOCOLID_TOE;
@@ -270,16 +261,12 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_cxt_mngr *p_mngr,
- struct ecore_src_iids *iids)
+static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- if (!src_proto(p_hwfn, i))
- continue;
-
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
@@ -297,8 +284,8 @@ struct ecore_tm_iids {
u32 per_vf_tids;
};
-static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
- struct ecore_tm_iids *iids)
+static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
{
bool tm_vf_required = false;
bool tm_required = false;
@@ -397,6 +384,20 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
+}
+
/* set the iids (cid/tid) count per protocol */
static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
@@ -687,7 +688,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_blk = &p_cli->pf_blks[0];
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
- total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = ecore_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@@ -706,7 +707,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* SRC */
p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@@ -820,7 +821,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@@ -1156,7 +1157,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@@ -1170,7 +1171,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
/* Initialize the dynamic ILT allocation mutex */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
@@ -1219,7 +1222,9 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
}
@@ -1422,29 +1427,32 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_mcp_link_state *p_link;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
- ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
- p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
qm_info->num_vf_pqs,
qm_info->start_vport,
qm_info->num_vports, qm_info->pf_wfq,
- qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+ qm_info->pf_rl, p_link->speed,
+ p_hwfn->qm_info.qm_pq_params,
p_hwfn->qm_info.qm_vport_params);
}
/* CM PF */
-void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
@@ -1639,7 +1647,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@@ -1766,9 +1774,11 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+ struct ecore_conn_type_cfg *p_fcoe;
struct ecore_tid_seg *p_tid;
+ p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
if (!p_fcoe->cid_count)
return;
@@ -1785,9 +1795,9 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
ecore_cdu_init_common(p_hwfn);
}
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
@@ -1969,20 +1979,6 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
-{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- p_mgr->srq_count = num_srqs;
-}
-
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
-{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- return p_mgr->srq_count;
-}
-
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
{
/* Set the number of required CORE connections */
@@ -1993,19 +1989,24 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
{
- struct ecore_eth_pf_params *p_params =
+ u32 count = 0;
+
+ struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- /* TODO - we probably want to add VF number to the PF
- * params;
- * As of now, allocates 16 * 2 per-VF [to retain regular
- * functionality].
- */
- ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 32);
- p_hwfn->p_cxt_mngr->arfs_count =
- p_params->num_arfs_filters;
- break;
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
+
+ count = p_params->num_arfs_filters;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+ &p_hwfn->p_dev->mf_bits))
+ p_hwfn->p_cxt_mngr->arfs_count = count;
+
+ break;
}
default:
return ECORE_INVAL;
@@ -2220,34 +2221,3 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-
-enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto)
-{
- enum _ecore_status_t rc;
- u32 cid;
-
- /* Free Connection CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
- ecore_cxt_get_proto_cid_start(p_hwfn,
- proto),
- ecore_cxt_get_proto_cid_count(p_hwfn,
- proto,
- &cid));
-
- if (rc)
- return rc;
-
- /* Free Task CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
- ecore_cxt_get_proto_tid_count(p_hwfn,
- proto));
- if (rc)
- return rc;
-
- /* Free TSDM CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
- ecore_cxt_get_srq_count(p_hwfn));
-
- return rc;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 6ff823a5..54761e4e 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -98,15 +98,17 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 4f1b0698..632297a7 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -28,13 +28,13 @@
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
- return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_ETHTYPE);
}
static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
{
- u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@@ -45,13 +45,13 @@ static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
- return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
- u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@@ -114,6 +114,21 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
}
}
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+ struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+ u8 i;
+
+ if (!dscp->enabled)
+ return ECORE_DCBX_DSCP_DISABLED;
+
+ for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+ if (pri == dscp->dscp_pri_map[i])
+ return i;
+
+ return ECORE_DCBX_DSCP_DISABLED;
+}
+
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
@@ -121,29 +136,18 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
- struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
-
/* PF update ramrod data */
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
- p_data->arr[type].dscp_enable = dscp->enabled;
- if (p_data->arr[type].dscp_enable) {
- u8 i;
-
- for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
- if (prio == dscp->dscp_pri_map[i]) {
- p_data->arr[type].dscp_val = i;
- break;
- }
+ p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio);
+ if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) {
+ p_data->arr[type].dscp_enable = false;
+ p_data->arr[type].dscp_val = 0;
+ } else {
+ p_data->arr[type].dscp_enable = true;
}
-
- if (enable && p_data->arr[type].dscp_enable)
- p_data->arr[type].update = UPDATE_DCB_DSCP;
- else if (enable)
- p_data->arr[type].update = UPDATE_DCB;
- else
- p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ p_data->arr[type].update = UPDATE_DCB_DSCP;
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
@@ -159,7 +163,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
- const char *name; /* @DPDK */
int i;
for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
@@ -169,7 +172,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
continue;
personality = ecore_dcbx_app_update[i].personality;
- name = ecore_dcbx_app_update[i].name;
ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
@@ -224,7 +226,7 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
return true;
}
-/* Parse app TLV's to update TC information in hw_info structure for
+/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
@@ -234,8 +236,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
int count, u8 dcbx_version)
{
enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
u8 tc, priority_map;
- bool enable, ieee;
u16 protocol_id;
u8 priority;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -246,12 +248,12 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
/* Parse APP TLV */
for (i = 0; i < count; i++) {
- protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PROTOCOL_ID);
- priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PRI_MAP);
+ protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
@@ -270,12 +272,23 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !(type == DCBX_PROTOCOL_ETH);
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
+
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
+ &p_hwfn->p_dev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
@@ -303,17 +316,17 @@ static enum _ecore_status_t
ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
{
struct dcbx_app_priority_feature *p_app;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_results data = { 0 };
struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_results data = { 0 };
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
u8 dcbx_version;
int num_entries;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_version = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+ dcbx_version = GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@@ -322,16 +335,15 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
pri_tc_tbl = p_ets->pri_tc_tbl[0];
p_info = &p_hwfn->hw_info;
- num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+ num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
- p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_MAX_TCS);
- p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
+ p_info->num_active_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = GET_MFW_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -349,9 +361,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_mib_meta_data *p_data,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* The data is considered to be valid only if both sequence numbers are
* the same.
@@ -362,6 +374,12 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
p_data->addr, p_data->size);
prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else if (type == ECORE_DCBX_LLDP_TLVS) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num;
+
} else {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
p_data->addr, p_data->size);
@@ -414,26 +432,24 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
u8 pri_map;
int i;
- p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_WILLING);
- p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_ENABLED);
- p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
- p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_NUM_ENTRIES);
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ p_params->app_willing = GET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING);
+ p_params->app_valid = GET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_params->app_entry[i];
if (ieee) {
u8 sf_ieee;
u32 val;
- sf_ieee = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF_IEEE);
+ sf_ieee = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
switch (sf_ieee) {
case DCBX_APP_SF_IEEE_RESERVED:
/* Old MFW */
- val = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF);
+ val = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
entry->sf_ieee = val ?
ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
ECORE_DCBX_SF_IEEE_ETHTYPE;
@@ -453,14 +469,14 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
break;
}
} else {
- entry->ethtype = !(ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF));
+ entry->ethtype = !(GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
}
- pri_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ pri_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
ecore_dcbx_get_app_priority(pri_map, &entry->prio);
- entry->proto_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PROTOCOL_ID);
+ entry->proto_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
&entry->proto_type, ieee);
@@ -478,10 +494,10 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
{
u8 pfc_map;
- p_params->pfc.willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
- p_params->pfc.max_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
- p_params->pfc.enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
- pfc_map = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
@@ -505,13 +521,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
u32 bw_map[2], tsa_map[2], pri_map;
int i;
- p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_WILLING);
- p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_ENABLED);
- p_params->ets_cbs = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
- p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_MAX_TCS);
+ p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING);
+ p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED);
+ p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
p_params->ets_willing, p_params->ets_enabled,
@@ -552,7 +565,6 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -566,7 +578,6 @@ ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -579,9 +590,33 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
params->remote.valid = true;
}
-static enum _ecore_status_t
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_dscp_params *p_dscp;
+ struct dcb_dscp_map *p_dscp_map;
+ int i, j, entry;
+ u32 pri_map;
+
+ p_dscp = &params->dscp;
+ p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
+
+ /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+ * where each entry holds the 4bit priority map for 8 dscp entries.
+ */
+ for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+ pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+ entry, pri_map);
+ for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+ p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+ (j * 4)) & 0xf;
+ }
+}
+
+static void
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
@@ -597,27 +632,27 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
- enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) !=
DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
- return ECORE_INVAL;
+ return;
}
p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_IEEE);
p_operational->ieee = val;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_STATIC);
p_operational->local = val;
@@ -632,45 +667,14 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
- err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
-
- return ECORE_SUCCESS;
-}
-
-static void
-ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
-{
- struct ecore_dcbx_dscp_params *p_dscp;
- struct dcb_dscp_map *p_dscp_map;
- int i, j, entry;
- u32 pri_map;
-
- p_dscp = &params->dscp;
- p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
- p_dscp->enabled = ECORE_MFW_GET_FIELD(p_dscp_map->flags,
- DCB_DSCP_ENABLE);
- /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
- * where each entry holds the 4bit priority map for 8 dscp entries.
- */
- for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
- pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
- entry, pri_map);
- for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
- p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
- (j * 4)) & 0xf;
- }
}
-static void
-ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@@ -678,15 +682,13 @@ ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_local.local_chassis_id,
p_local->local_chassis_id,
- OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ sizeof(params->lldp_local.local_chassis_id));
OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
- OSAL_ARRAY_SIZE(p_local->local_port_id));
+ sizeof(params->lldp_local.local_port_id));
}
-static void
-ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@@ -694,40 +696,38 @@ ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
p_remote->peer_chassis_id,
- OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ sizeof(params->lldp_remote.peer_chassis_id));
OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
- OSAL_ARRAY_SIZE(p_remote->peer_port_id));
+ sizeof(params->lldp_remote.peer_port_id));
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_params,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
- ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_MIB:
- ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_params(p_hwfn, p_params);
break;
case ECORE_DCBX_OPERATIONAL_MIB:
- ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_operational_params(p_hwfn, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
return ECORE_INVAL;
}
- return rc;
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t
@@ -857,7 +857,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
}
- return rc;
+ return ECORE_SUCCESS;
}
/*
@@ -876,33 +876,28 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
- ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
- &p_hwfn->p_dcbx_info->get);
+ ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
rc = ecore_dcbx_process_mib_info(p_hwfn);
if (!rc) {
- bool enabled;
-
/* reconfigure tcs of QM queues according
* to negotiation results
*/
ecore_qm_reconf(p_hwfn, p_ptt);
/* update storm FW with negotiation results */
- ecore_sp_pf_update(p_hwfn);
-
- /* set eagle enigne 1 flow control workaround
- * according to negotiation results
- */
- enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
+ ecore_sp_pf_update_dcbx(p_hwfn);
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
p_hwfn->p_dcbx_info->dscp_nig_update) {
- ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, 0x1);
+ u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
@@ -927,8 +922,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_info *p_dcbx_info)
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
}
@@ -951,8 +945,6 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct protocol_dcb_data *p_dcb_data;
u8 update_flag;
- p_dest->pf_id = p_src->pf_id;
-
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
p_dest->update_eth_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
@@ -975,17 +967,16 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- rc = ECORE_TIMEOUT;
- DP_ERR(p_hwfn, "rc = %d\n", rc);
- return rc;
- }
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ ecore_dcbx_get_dscp_params(p_hwfn, p_get);
+
+ rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
@@ -1010,13 +1001,13 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
*pfc &= ~DCBX_PFC_ENABLED_MASK;
*pfc &= ~DCBX_PFC_CAPS_MASK;
- *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
pfc_map |= (1 << i);
*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
- *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
}
@@ -1046,7 +1037,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
- p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
@@ -1092,9 +1083,9 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
p_app->flags |= (u32)p_params->num_app_entries <<
- DCBX_APP_NUM_ENTRIES_SHIFT;
+ DCBX_APP_NUM_ENTRIES_OFFSET;
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry = 0;
if (ieee) {
@@ -1102,50 +1093,50 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT;
+ DCBX_APP_SF_IEEE_OFFSET;
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
}
} else {
*entry &= ~DCBX_APP_SF_MASK;
if (p_params->app_entry[i].ethtype)
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
else
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
}
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
- DCBX_APP_PROTOCOL_ID_SHIFT);
+ DCBX_APP_PROTOCOL_ID_OFFSET);
*entry &= ~DCBX_APP_PRI_MAP_MASK;
*entry |= ((u32)(p_params->app_entry[i].prio) <<
- DCBX_APP_PRI_MAP_SHIFT);
+ DCBX_APP_PRI_MAP_OFFSET);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
-static enum _ecore_status_t
+static void
ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
struct dcbx_local_params *local_admin,
struct ecore_dcbx_set *params)
@@ -1164,6 +1155,9 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@@ -1175,8 +1169,6 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
&params->config.params, ieee);
-
- return ECORE_SUCCESS;
}
static enum _ecore_status_t
@@ -1206,6 +1198,12 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->dscp_nig_update = true;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
+ p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
+ p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
+ p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);
return ECORE_SUCCESS;
}
@@ -1221,15 +1219,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
u32 resp = 0, param = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!hw_commit) {
- OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
- sizeof(p_hwfn->p_dcbx_info->set));
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(p_hwfn->p_dcbx_info->set));
+ if (!hw_commit)
return ECORE_SUCCESS;
- }
-
- /* clear set-parmas cache */
- OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
- sizeof(struct ecore_dcbx_set));
OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
@@ -1253,12 +1246,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
- if (rc != ECORE_SUCCESS) {
+ 1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, &param);
+ if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX update request\n");
- return rc;
- }
return rc;
}
@@ -1277,10 +1268,8 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*dcbx_info));
- if (!dcbx_info) {
- DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
+ if (!dcbx_info)
return ECORE_NOMEM;
- }
OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
@@ -1300,9 +1289,12 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp,
+ &p_hwfn->p_dcbx_info->get.dscp,
+ sizeof(struct ecore_dcbx_dscp_params));
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct ecore_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@@ -1312,3 +1304,230 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_TLV_RX_TYPE, tlv_type);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to register TLV\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_received_tlvs_s tlvs;
+ int i;
+
+ for (i = 0; i < LLDP_MAX_LLDP_AGENTS; i++) {
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_received_tlvs[i]);
+ data.lldp_tlvs = &tlvs;
+ data.size = sizeof(tlvs);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data,
+ ECORE_DCBX_LLDP_TLVS);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "Failed to read lldp TLVs\n");
+ return rc;
+ }
+
+ if (!tlvs.length)
+ continue;
+
+ for (i = 0; i < MAX_TLV_BUFFER; i++)
+ tlvs.tlvs_buffer[i] =
+ OSAL_CPU_TO_BE32(tlvs.tlvs_buffer[i]);
+
+ OSAL_LLDP_RX_TLVS(p_hwfn, tlvs.tlvs_buffer, tlvs.length);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ struct lldp_config_params_s lldp_params;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &lldp_params, addr,
+ sizeof(lldp_params));
+
+ p_params->tx_interval = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_TX_INTERVAL);
+ p_params->tx_hold = GET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD);
+ p_params->tx_credit = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_MAX_CREDIT);
+ p_params->rx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_RX);
+ p_params->tx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_TX);
+
+ OSAL_MEMCPY(p_params->chassis_id_tlv, lldp_params.local_chassis_id,
+ sizeof(p_params->chassis_id_tlv));
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->chassis_id_tlv[i]);
+
+ OSAL_MEMCPY(p_params->port_id_tlv, lldp_params.local_port_id,
+ sizeof(p_params->port_id_tlv));
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->port_id_tlv[i]);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ struct lldp_config_params_s lldp_params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ OSAL_MEMSET(&lldp_params, 0, sizeof(lldp_params));
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_TX_INTERVAL,
+ p_params->tx_interval);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD, p_params->tx_hold);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_MAX_CREDIT,
+ p_params->tx_credit);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_RX,
+ !!p_params->rx_enable);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_TX,
+ !!p_params->tx_enable);
+
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->chassis_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_chassis_id, p_params->chassis_id_tlv,
+ sizeof(lldp_params.local_chassis_id));
+
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->port_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_port_id, p_params->port_id_tlv,
+ sizeof(lldp_params.local_port_id));
+
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lldp_params, sizeof(lldp_params));
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_system_tlvs_buffer_s lld_tlv_buf;
+ u32 addr, *p_val;
+ u8 len;
+ int i;
+
+ p_val = (u32 *)p_params->buf;
+ for (i = 0; i < ECORE_LLDP_SYS_TLV_SIZE / 4; i++)
+ p_val[i] = OSAL_CPU_TO_BE32(p_val[i]);
+
+ OSAL_MEMSET(&lld_tlv_buf, 0, sizeof(lld_tlv_buf));
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_VALID, 1);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_MANDATORY,
+ !!p_params->discard_mandatory_tlv);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_LENGTH,
+ p_params->buf_size);
+ len = ECORE_LLDP_SYS_TLV_SIZE / 2;
+ OSAL_MEMCPY(lld_tlv_buf.data, p_params->buf, len);
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lld_tlv_buf, sizeof(lld_tlv_buf));
+
+ if (p_params->buf_size > len) {
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf2);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &p_params->buf[len],
+ ECORE_LLDP_SYS_TLV_SIZE / 2);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index eba2d91b..469e42dd 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,6 +17,8 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
+#define ECORE_DCBX_DSCP_DISABLED 0XFF
+
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -35,6 +37,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_mib_meta_data {
struct lldp_config_params_s *lldp_local;
struct lldp_status_params_s *lldp_remote;
+ struct lldp_received_tlvs_s *lldp_tlvs;
struct dcbx_local_params *local_admin;
struct dcb_dscp_map *dscp_map;
struct dcbx_mib *mib;
@@ -47,11 +50,15 @@ enum _ecore_status_t
ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
enum ecore_mib_read_type);
-enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
- struct ecore_ptt *);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
-void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+/* Returns TOS value for a given priority */
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 2dc76796..9ff4df4c 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -18,7 +18,8 @@ enum ecore_mib_read_type {
ECORE_DCBX_REMOTE_MIB,
ECORE_DCBX_LOCAL_MIB,
ECORE_DCBX_REMOTE_LLDP_MIB,
- ECORE_DCBX_LOCAL_LLDP_MIB
+ ECORE_DCBX_LOCAL_LLDP_MIB,
+ ECORE_DCBX_LLDP_TLVS
};
struct ecore_dcbx_app_data {
@@ -101,7 +102,6 @@ struct ecore_dcbx_params {
bool ets_willing;
bool ets_enabled;
bool ets_cbs;
- bool valid; /* Indicate validity of params */
u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
@@ -175,6 +175,31 @@ struct ecore_dcbx_app_metadata {
enum ecore_pci_personality personality;
};
+enum ecore_lldp_agent {
+ ECORE_LLDP_NEAREST_BRIDGE = 0,
+ ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE,
+ ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE,
+ ECORE_LLDP_MAX_AGENTS
+};
+
+struct ecore_lldp_config_params {
+ enum ecore_lldp_agent agent;
+ u8 tx_interval;
+ u8 tx_hold;
+ u8 tx_credit;
+ bool rx_enable;
+ bool tx_enable;
+ u32 chassis_id_tlv[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 port_id_tlv[ECORE_LLDP_PORT_ID_STAT_LEN];
+};
+
+#define ECORE_LLDP_SYS_TLV_SIZE 256
+struct ecore_lldp_sys_tlvs {
+ bool discard_mandatory_tlv;
+ u8 buf[ECORE_LLDP_SYS_TLV_SIZE];
+ u16 buf_size;
+};
+
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
struct ecore_dcbx_get *,
enum ecore_mib_read_type);
@@ -187,6 +212,23 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
struct ecore_dcbx_set *,
bool);
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type);
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params);
+
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 65b89b8f..da1830ce 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -28,7 +28,6 @@
#include "mcp_public.h"
#include "ecore_iro.h"
#include "nvm_cfg.h"
-#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
#include "ecore_l2.h"
@@ -42,6 +41,318 @@
static osal_spinlock_t qm_lock;
static bool qm_lock_init;
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct ecore_db_recovery_entry {
+ osal_list_entry_t list_entry;
+ void OSAL_IOMEM *db_addr;
+ void *db_data;
+ enum ecore_db_rec_width db_width;
+ enum ecore_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* display a single doorbell recovery entry */
+void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ const char *action)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action, db_entry, db_entry->db_addr, db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ /* make sure doorbell address is within the doorbell bar */
+ if (db_addr < p_dev->doorbells || (u8 *)db_addr >
+ (u8 *)p_dev->doorbells + p_dev->db_size) {
+ OSAL_WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr, p_dev->doorbells,
+ (u8 *)p_dev->doorbells + p_dev->db_size);
+ return false;
+ }
+
+ /* make sure doorbell data pointer is not null */
+ if (!db_data) {
+ OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* find hwfn according to the doorbell address */
+struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr)
+{
+ struct ecore_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and
+ * enigne 1
+ */
+ if (ECORE_IS_CMT(p_dev))
+ p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+ &p_dev->hwfns[0] : &p_dev->hwfns[1];
+ else
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+ return p_hwfn;
+}
+
+/* add a new entry to the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space)
+{
+ struct ecore_db_recovery_entry *db_entry;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* create entry */
+ db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
+ if (!db_entry) {
+ DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
+ return ECORE_NOMEM;
+ }
+
+ /* populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* display */
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ return ECORE_SUCCESS;
+}
+
+/* remove an entry from the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ /* search according to db_data addr since db_addr is not unique
+ * (roce)
+ */
+ if (db_entry->db_data == db_data) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry,
+ "Deleting");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ rc = ECORE_SUCCESS;
+ break;
+ }
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == ECORE_INVAL)
+ /*OSAL_WARN(true,*/
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ OSAL_FREE(p_dev, db_entry);
+
+ return rc;
+}
+
+/* initialize the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
+
+ /* make sure db_size was set in p_dev */
+ if (!p_hwfn->p_dev->db_size) {
+ DP_ERR(p_hwfn->p_dev, "db_size not set\n");
+ return ECORE_INVAL;
+ }
+
+ OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock);
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return ECORE_SUCCESS;
+}
+
+/* destroy the doorbell recovery mechanism */
+void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ db_entry = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->db_recovery_info.list,
+ struct ecore_db_recovery_entry,
+ list_entry);
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_FREE(p_hwfn->p_dev, db_entry);
+ }
+ }
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
+#endif
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* print the content of the doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_NOTICE(p_hwfn, false,
+ "Dispalying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+
+/* ring the doorbell of a single doorbell recovery entry */
+void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ enum ecore_db_rec_exec db_exec)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr, *(u32 *)db_entry->db_data);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(unsigned long *)(db_entry->db_data));
+
+ /* Sanity */
+ if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+}
+
+/* traverse the doorbell recovery entry list and ring all the doorbells */
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec db_exec)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+/******************** Doorbell Recovery end ****************/
+
/* Configurable */
#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
* load the driver. The number was
@@ -49,28 +360,20 @@ static bool qm_lock_init;
*/
/* Derived */
-#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
-
-enum BAR_ID {
- BAR_ID_0, /* used for GRC */
- BAR_ID_1 /* Used for doorbells */
-};
+#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
-static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val;
- if (IS_VF(p_hwfn->p_dev)) {
- /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
- * read from actual register, but we're currently not using
- * it for actual doorbelling.
- */
- return 1 << 17;
- }
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_hw_bar_size(p_hwfn, bar_id);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = ecore_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -78,15 +381,13 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 256kB"
- " for GRC and 512kB for DB\n");
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 512kB"
- " for GRC and 512kB for DB\n");
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
val = 512 * 1024;
}
@@ -121,7 +422,9 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->my_id = i;
p_hwfn->b_active = false;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+#endif
OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
}
@@ -168,8 +471,11 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_iov_free(p_hwfn);
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
- ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ ecore_dcbx_info_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
+
+ /* destroy doorbell recovery mechanism */
+ ecore_db_recovery_teardown(p_hwfn);
}
}
@@ -307,7 +613,7 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -336,7 +642,7 @@ static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -348,7 +654,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
}
@@ -690,7 +996,7 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
port = &qm_info->qm_port_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"port idx %d, active %d, active_phys_tcs %d,"
@@ -777,7 +1083,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -819,7 +1125,7 @@ static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct init_qm_port_params) *
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -861,12 +1167,17 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
+ /* initialize the doorbell recovery mechanism */
+ rc = ecore_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = ecore_cxt_mngr_alloc(p_hwfn);
if (rc)
goto alloc_err;
- /* Set the HW cid/tid numbers (in the contest manager)
+ /* Set the HW cid/tid numbers (in the context manager)
* Must be done prior to any further computations.
*/
rc = ecore_cxt_set_pf_params(p_hwfn);
@@ -1036,7 +1347,7 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_l2_setup(p_hwfn);
- ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_iov_setup(p_hwfn);
}
}
@@ -1116,7 +1427,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
}
/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
- switch (p_hwfn->p_dev->num_ports_in_engines) {
+ switch (p_hwfn->p_dev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
@@ -1129,23 +1440,15 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, true,
"num_ports_in_engine = %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
return ECORE_INVAL;
}
- switch (p_hwfn->p_dev->mf_mode) {
- case ECORE_MF_DEFAULT:
- case ECORE_MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
- case ECORE_MF_OVLAN:
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
+ &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Unsupported MF mode, init as DEFAULT\n");
+ else
hw_mode |= 1 << MODE_MF_SI;
- }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -1161,7 +1464,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
hw_mode |= 1 << MODE_100G;
p_hwfn->hw_info.hw_mode = hw_mode;
@@ -1203,10 +1506,10 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_AH(p_dev)) {
/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_dev->num_ports_in_engines >> 1));
+ (p_dev->num_ports_in_engine >> 1));
ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
}
}
@@ -1232,7 +1535,7 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, sb_id;
+ int i, igu_sb_id;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -1242,20 +1545,77 @@ static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
p_igu_info = p_hwfn->hw_info.p_igu_info;
- for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+ igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
if (!p_block->is_pf)
continue;
ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
}
}
}
+static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (wr_mbs < OSAL_CACHE_LINE_SIZE)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ OSAL_CACHE_LINE_SIZE, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
+}
+
static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
@@ -1270,11 +1630,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
- ecore_gtt_init(p_hwfn);
+ ecore_gtt_init(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_hw_init_chip(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
@@ -1288,7 +1648,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
}
ecore_qm_common_rt_init(p_hwfn,
- p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engine,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1296,6 +1656,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_cxt_hw_init_common(p_hwfn);
+ ecore_init_cache_line_size(p_hwfn, p_ptt);
+
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1511,9 +1873,9 @@ static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size
* ------------------
@@ -1536,12 +1898,11 @@ ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
* 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
* containing 4 WIDs.
*/
- dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
- dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
- dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+ n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
+ dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
+ dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
+ ~(OSAL_PAGE_SIZE - 1);
dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -1578,8 +1939,8 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
- db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
- if (p_hwfn->p_dev->num_hwfns > 1)
+ db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
db_bar_size /= 2;
/* Calculate doorbell regions
@@ -1600,7 +1961,8 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
OSAL_NULL) +
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
- norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
@@ -1626,7 +1988,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
- n_cpus = OSAL_NUM_ACTIVE_CPU();
+ n_cpus = OSAL_NUM_CPUS();
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
@@ -1678,12 +2040,29 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
{
+ u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
+ u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 i;
+
+ /* In CMT for non-RoCE packets - use connection based classification */
+ val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
+ for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
+ ppf_to_eng_sel[i] = val;
+ STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
+ ppf_to_eng_sel);
+
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
+
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
return ECORE_SUCCESS;
@@ -1694,7 +2073,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
else if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
/* Activate OPTE in CMT */
u32 val;
@@ -1746,7 +2125,7 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100000;
}
- ecore_cxt_hw_init_pf(p_hwfn);
+ ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
ecore_int_igu_init_rt(p_hwfn);
@@ -1756,6 +2135,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
}
/* Enable classification by MAC if needed */
@@ -1815,7 +2199,7 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
@@ -1898,6 +2282,37 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
+static void
+ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
+ struct ecore_drv_load_params *p_drv_load)
+{
+ /* Make sure that if ecore-client didn't provide inputs, all the
+ * expected defaults are indeed zero.
+ */
+ OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
+ OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
+ OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
+
+ OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
+
+ if (p_drv_load != OSAL_NULL) {
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load =
+ p_drv_load->override_force_load;
+ }
+}
+
enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_hw_init_params *p_params)
{
@@ -1911,13 +2326,6 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
-}
-
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
@@ -1928,8 +2336,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
- (p_dev->num_hwfns > 1)) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
@@ -1959,12 +2366,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
- load_req_params.drv_role = p_params->is_crash_kernel ?
- ECORE_DRV_ROLE_KDUMP :
- ECORE_DRV_ROLE_OS;
- load_req_params.timeout_val = p_params->mfw_timeout_val;
- load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ ecore_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
@@ -1978,6 +2381,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
"Load request was sent. Load code: 0x%x\n",
load_code);
+ ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
@@ -2072,7 +2477,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
"sending phony dcbx set command to trigger DCBx attention handling\n");
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
&param);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
@@ -2255,6 +2660,13 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+ rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to return IGU CAM to default\n");
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
@@ -2303,18 +2715,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
return rc2;
}
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
{
int j;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
@@ -2336,15 +2751,22 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ ecore_ptt_release(p_hwfn, p_ptt);
}
+
+ return ECORE_SUCCESS;
}
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev))
- return;
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
@@ -2357,8 +2779,11 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
}
/* Re-open incoming traffic */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -2423,27 +2848,35 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
- struct ecore_sb_cnt_info sb_cnt_info;
- int num_features = 1;
+ struct ecore_sb_cnt_info sb_cnt;
+ u32 non_l2_sbs = 0;
+
+ OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
/* L2 Queues require each: 1 status block. 1 L2 queue */
- feat_num[ECORE_PF_L2_QUE] =
- OSAL_MIN_T(u32,
- RESC_NUM(p_hwfn, ECORE_SB) / num_features,
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
-
- OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
- ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- feat_num[ECORE_VF_L2_QUE] =
- OSAL_MIN_T(u32,
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
- FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
- sb_cnt_info.sb_iov_cnt);
-
- feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
- feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
+ /* Start by allocating VF queues, then PF's */
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+ sb_cnt.iov_cnt);
+ feat_num[ECORE_PF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ sb_cnt.cnt - non_l2_sbs,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
+ }
+
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ feat_num[ECORE_FCOE_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
+
+ if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ feat_num[ECORE_ISCSI_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -2452,14 +2885,12 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
(int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
- RESC_NUM(p_hwfn, ECORE_SB));
+ (int)sb_cnt.cnt);
}
const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
{
switch (res_id) {
- case ECORE_SB:
- return "SB";
case ECORE_L2_QUEUE:
return "L2_QUEUE";
case ECORE_VPORT:
@@ -2486,6 +2917,8 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
return "RDMA_STATS_QUEUE";
case ECORE_BDQ:
return "BDQ";
+ case ECORE_SB:
+ return "SB";
default:
return "UNKNOWN_RESOURCE";
}
@@ -2493,12 +2926,14 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
static enum _ecore_status_t
__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id, u32 resc_max_val,
+ struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id,
+ u32 resc_max_val,
u32 *p_mcp_resp)
{
enum _ecore_status_t rc;
- rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
@@ -2516,7 +2951,8 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
@@ -2536,7 +2972,7 @@ ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
continue;
}
- rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2561,14 +2997,8 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
- struct ecore_sb_cnt_info sb_cnt_info;
switch (res_id) {
- case ECORE_SB:
- OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
- ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.sb_cnt;
- break;
case ECORE_L2_QUEUE:
*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
@@ -2625,6 +3055,12 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
if (!*p_resc_num)
*p_resc_start = 0;
break;
+ case ECORE_SB:
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
+ break;
default:
*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
break;
@@ -2689,14 +3125,9 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
goto out;
}
- /* TBD - remove this when revising the handling of the SB resource */
- if (res_id == ECORE_SB) {
- /* Excluding the slowpath SB */
- *p_resc_num -= 1;
- *p_resc_start -= p_hwfn->enabled_func_idx;
- }
-
- if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
+ if ((*p_resc_num != dflt_resc_num ||
+ *p_resc_start != dflt_resc_start) &&
+ res_id != ECORE_SB) {
DP_INFO(p_hwfn,
"MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
@@ -2726,10 +3157,8 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
-#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
-
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
{
struct ecore_resc_unlock_params resc_unlock_params;
@@ -2759,15 +3188,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
* Old drivers that don't acquire the lock can run in parallel, and
* their allocation values won't be affected by the updated max values.
*/
- OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
- resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
- resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
- resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
- resc_lock_params.sleep_b4_retry = true;
- OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
- resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
-
- rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ ECORE_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
} else if (rc == ECORE_NOTIMPL) {
@@ -2779,7 +3203,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
rc = ECORE_BUSY;
goto unlock_and_exit;
} else {
- rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
DP_NOTICE(p_hwfn, false,
"Failed to set the max values of the soft resources\n");
@@ -2787,7 +3211,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
} else if (rc == ECORE_NOTIMPL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@@ -2800,7 +3224,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@@ -2846,6 +3270,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
+ /* This will also learn the number of SBs from MFW */
+ if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
+ return ECORE_INVAL;
+
ecore_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
@@ -2859,7 +3287,9 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
unlock_and_exit:
- ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
return rc;
}
@@ -2870,6 +3300,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ struct ecore_mcp_link_capabilities *p_caps;
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
@@ -2889,8 +3320,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
- core_cfg);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, core_cfg);
core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
@@ -2959,6 +3390,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
link_temp = ecore_rd(p_hwfn, p_ptt,
@@ -2966,13 +3398,11 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
link->speed.advertised_speeds = link_temp;
-
- link_temp = link->speed.advertised_speeds;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
link_temp = ecore_rd(p_hwfn, p_ptt,
- port_cfg_addr +
- OFFSETOF(struct nvm_cfg1_port, link_settings));
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
@@ -3000,10 +3430,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
}
- p_hwfn->mcp_info->link_capabilities.default_speed =
- link->speed.forced_speed;
- p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
- link->speed.autoneg;
+ p_caps->default_speed = link->speed.forced_speed;
+ p_caps->default_speed_autoneg = link->speed.autoneg;
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
@@ -3015,15 +3443,47 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0;
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
+ link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, ext_phy));
+ link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
+ link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
+ link->eee.enable = true;
+ switch (link_temp) {
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
+ p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
+ link->eee.enable = false;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
+ p_caps->eee_lpi_timer =
+ EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
+ break;
+ }
+
+ link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
+ link->eee.tx_lpi_enable = link->eee.enable;
+ link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
+ } else {
+ p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
+ }
+
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]",
link->speed.forced_speed, link->speed.advertised_speeds,
- link->speed.autoneg, link->pause.autoneg);
+ link->speed.autoneg, link->pause.autoneg,
+ p_caps->default_eee, p_caps->eee_lpi_timer);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
@@ -3032,6 +3492,41 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_UFP_SPECIFIC;
+ break;
+
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->p_dev->mf_bits);
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+
+ /* It's funny since we have another switch, but it's easier
+ * to throw this away in linux this way. Long term, it might be
+ * better to have have getters for needed ECORE_MF_* fields,
+ * convert client code and eliminate this.
+ */
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3040,31 +3535,32 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
+ break;
}
- DP_INFO(p_hwfn, "Multi function mode is %08x\n",
- p_hwfn->p_dev->mf_mode);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
@@ -3101,7 +3597,7 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
- if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
@@ -3151,14 +3647,14 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port_mode;
#ifndef ASIC_ONLY
/* Read the port mode */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (CHIP_REV_IS_FPGA(p_dev))
port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
- (p_hwfn->p_dev->num_hwfns > 1))
+ else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
/* In CMT on emulation, assume 1 port */
port_mode = 1;
else
@@ -3166,38 +3662,39 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_dev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
}
}
static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port;
int i;
- p_hwfn->p_dev->num_ports_in_engines = 0;
+ p_dev->num_ports_in_engine = 0;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
switch ((port & 0xf000) >> 12) {
case 1:
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
break;
case 3:
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
break;
case 0xf:
- p_hwfn->p_dev->num_ports_in_engines = 4;
+ p_dev->num_ports_in_engine = 4;
break;
default:
DP_NOTICE(p_hwfn, false,
@@ -3211,17 +3708,68 @@ static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
CNIG_REG_NIG_PORT0_CONF_K2_E5 +
(i * 4));
if (port & 1)
- p_hwfn->p_dev->num_ports_in_engines++;
+ p_dev->num_ports_in_engine++;
}
+
+ if (!p_dev->num_ports_in_engine) {
+ DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
+ }
}
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (ECORE_IS_BB(p_hwfn->p_dev))
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* Determine the number of ports per engine */
+ if (ECORE_IS_BB(p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+
+ /* Get the total number of ports of the device */
+ if (ECORE_IS_CMT(p_dev)) {
+ /* In CMT there is always only one port */
+ p_dev->num_ports = 1;
+#ifndef ASIC_ONLY
+ } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
+#endif
+ } else {
+ u32 addr, global_offsize, global_addr;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, max_ports);
+ p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_capabilities *p_caps;
+ u32 eee_status;
+
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
+ return;
+
+ p_caps->eee_speed_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
+ EEE_SUPPORTED_SPEED_OFFSET;
+ if (eee_status & EEE_1G_SUPPORTED)
+ p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
+ if (eee_status & EEE_10G_ADV)
+ p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
}
static enum _ecore_status_t
@@ -3244,14 +3792,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
}
- /* TODO In get_hw_info, amoungst others:
- * Get MCP FW revision and determine according to it the supported
- * featrues (e.g. DCB)
- * Get boot mode
- * ecore_get_pcie_width_speed, WOL capability.
- * Number of global CQ-s (for storage
- */
- ecore_hw_info_port_num(p_hwfn, p_ptt);
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+ ecore_mcp_get_capabilities(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
@@ -3291,6 +3835,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
p_hwfn->mcp_info->func_info.ovlan;
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+
+ ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
+
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
}
if (personality != ECORE_PCI_DEFAULT) {
@@ -3336,7 +3884,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
rc = ECORE_SUCCESS;
p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
@@ -3345,9 +3893,11 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
-static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
@@ -3357,21 +3907,27 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
&p_dev->device_id);
/* Determine type */
- if ((p_dev->device_id & ECORE_DEV_ID_MASK) == ECORE_DEV_ID_MASK_AH)
- p_dev->type = ECORE_DEV_TYPE_AH;
- else
+ device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case ECORE_DEV_ID_MASK_BB:
p_dev->type = ECORE_DEV_TYPE_BB;
+ break;
+ case ECORE_DEV_ID_MASK_AH:
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
+ p_dev->device_id);
+ return ECORE_ABORTED;
+ }
- p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
-
- MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
/* Learn number of HW-functions */
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
@@ -3391,32 +3947,29 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
}
#endif
- p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_TEST_REG) >> 4;
- MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
- p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
- MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
+ p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+ p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
+
DP_INFO(p_dev->hwfns,
- "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
- if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+ if (ECORE_IS_BB_A0(p_dev)) {
DP_NOTICE(p_dev->hwfns, false,
"The chip type/rev (BB A0) is not supported!\n");
return ECORE_ABORTED;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
if (CHIP_REV_IS_EMUL(p_dev)) {
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_ECO_RESERVED);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
if (tmp & (1 << 29)) {
DP_NOTICE(p_hwfn, false,
"Emulation: Running on a FULL build\n");
@@ -3446,7 +3999,6 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
"Mark hw/fw uninitialized\n");
p_hwfn->hw_init_done = false;
- p_hwfn->first_on_engine = false;
ecore_ptt_invalidate(p_hwfn);
}
@@ -3459,6 +4011,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
+ struct ecore_mdump_retain_data mdump_retain;
struct ecore_dev *p_dev = p_hwfn->p_dev;
struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -3495,7 +4048,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = ecore_get_dev_info(p_dev);
+ rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
@@ -3526,24 +4079,37 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
* called, since among others it sets the ports number in an engine.
*/
- if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
!p_dev->recov_in_prog) {
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
}
- /* Check if mdump logs are present and update the epoch value */
- if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ /* Check if mdump logs/data are present and update the epoch value */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+#endif
rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
&mdump_info);
- if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
DP_NOTICE(p_hwfn, false,
"* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
- }
+
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid)
+ DP_NOTICE(p_hwfn, false,
+ "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
p_params->epoch);
+#ifndef ASIC_ONLY
+ }
+#endif
}
/* Allocate the init RT array and initialize the init-ops engine */
@@ -3605,17 +4171,21 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_params->personality = p_hwfn->hw_info.personality;
/* initilalize 2nd hwfn if necessary */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
/* adjust bar offset for second engine */
addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = (void OSAL_IOMEM *)addr;
addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
/* prepare second hw function */
@@ -3666,7 +4236,9 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+#endif
}
ecore_iov_free_hw_info(p_dev);
@@ -3844,11 +4416,11 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
struct ecore_chain *p_chain,
struct ecore_chain_ext_pbl *ext_pbl)
{
- void *p_virt = OSAL_NULL;
- u8 *p_pbl_virt = OSAL_NULL;
- void **pp_virt_addr_tbl = OSAL_NULL;
- dma_addr_t p_phys = 0, p_pbl_phys = 0;
u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void *p_virt = OSAL_NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
@@ -4061,9 +4633,10 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
@@ -4084,7 +4657,7 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
p_filter[0], p_filter[1], p_filter[2], p_filter[3],
p_filter[4], p_filter[5], entry_num);
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
@@ -4128,9 +4701,10 @@ void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = p_filter[1] | (p_filter[0] << 8);
@@ -4202,10 +4776,11 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return rc;
high = 0;
low = 0;
@@ -4278,7 +4853,7 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
break;
}
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
@@ -4345,9 +4920,10 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = 0;
@@ -4415,7 +4991,10 @@ static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
@@ -4426,7 +5005,7 @@ enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
1 << p_hwfn->abs_pf_id / 2);
@@ -4526,7 +5105,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
- p_cid->abs.sb_idx, false);
+ p_cid->sb_igu_id, false);
if (rc != ECORE_SUCCESS)
goto out;
@@ -4567,7 +5146,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
- p_cid->abs.sb_idx, true);
+ p_cid->sb_igu_id, true);
if (rc != ECORE_SUCCESS)
goto out;
@@ -4604,8 +5183,7 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
}
}
-static void
-ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
{
int i;
@@ -4614,8 +5192,7 @@ ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 min_pf_rate)
+ struct ecore_ptt *p_ptt)
{
struct init_qm_vport_params *vport_params;
int i;
@@ -4623,7 +5200,7 @@ static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
- ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_wfq_default_param(p_hwfn);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
@@ -4664,13 +5241,6 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
non_requested_count = num_vports - req_count;
/* validate possible error cases */
- if (req_rate > min_pf_rate) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
- vport_id, req_rate, min_pf_rate);
- return ECORE_INVAL;
- }
-
if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
@@ -4777,7 +5347,7 @@ static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS && use_wfq)
ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
else
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
return rc;
}
@@ -4791,7 +5361,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
int i, rc = ECORE_INVAL;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"WFQ configuration is not supported for this device\n");
return rc;
@@ -4820,12 +5390,13 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
/* API to configure WFQ from mcp link change */
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
u32 min_pf_rate)
{
int i;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_VERBOSE(p_dev, ECORE_MSG_LINK,
"WFQ configuration is not supported for this device\n");
return;
@@ -4834,8 +5405,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- __ecore_configure_vp_wfq_on_link_change(p_hwfn,
- p_hwfn->p_dpc_ptt,
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
min_pf_rate);
}
}
@@ -4980,8 +5550,7 @@ void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
- p_link->min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) *
@@ -4995,11 +5564,7 @@ int ecore_device_num_engines(struct ecore_dev *p_dev)
int ecore_device_num_ports(struct ecore_dev *p_dev)
{
- /* in CMT always only one port */
- if (p_dev->num_hwfns > 1)
- return 1;
-
- return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+ return p_dev->num_ports;
}
void ecore_set_fw_mac_addr(__le16 *fw_msb,
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 9126cf95..98bcabe8 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -57,22 +57,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
-struct ecore_hw_init_params {
- /* Tunnelling parameters */
- struct ecore_tunnel_info *p_tunn;
-
- bool b_hw_start;
-
- /* Interrupt mode [msix, inta, etc.] to use */
- enum ecore_int_mode int_mode;
-
- /* NPAR tx switching to be used for vports configured for tx-switching
- */
- bool allow_npar_tx_switch;
-
- /* Binary fw data pointer in binary fw file */
- const u8 *bin_fw_data;
+enum ecore_override_force_load {
+ ECORE_OVERRIDE_FORCE_LOAD_NONE,
+ ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
+ ECORE_OVERRIDE_FORCE_LOAD_NEVER,
+};
+struct ecore_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
@@ -90,6 +81,29 @@ struct ecore_hw_init_params {
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum ecore_override_force_load override_force_load;
+};
+
+struct ecore_hw_init_params {
+ /* Tunneling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum ecore_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct ecore_drv_load_params *p_drv_load_params;
};
/**
@@ -128,8 +142,9 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*
* @param p_dev
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
#ifndef LINUX_REMOVE
/**
@@ -140,16 +155,62 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
*
*/
void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+enum ecore_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum ecore_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param p_dev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data);
+
+static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
+}
+
#endif
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
- * @param p_dev
+ * @param p_hwfn
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,
@@ -240,7 +301,6 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
-#ifndef __EXTRACT__LINUX__
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
@@ -331,7 +391,6 @@ struct ecore_eth_stats {
struct ecore_eth_stats_ah ah;
};
};
-#endif
enum ecore_dmae_address_type_t {
ECORE_DMAE_ADDRESS_HOST_VIRT,
@@ -580,6 +639,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
+
+/**
+ * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - store coalesce value read from the hardware.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
+ void *handle);
+
/**
* @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 5c2a08f9..d8abd604 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -618,7 +618,7 @@ struct ustorm_core_conn_st_ctx {
/*
* core connection context
*/
-struct core_conn_context {
+struct e4_core_conn_context {
/* ystorm storm context */
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2] /* padding */;
@@ -661,6 +661,7 @@ enum core_event_opcode {
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
@@ -745,6 +746,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
+ CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@@ -858,7 +860,8 @@ struct core_rx_gsi_offload_cqe {
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
/* These are the lower 16 bit of QP id in RoCE BTH header */
__le16 qp_id;
- __le32 gid_dst[4] /* Gid destination address */;
+ __le32 src_qp /* Source QP from DETH header */;
+ __le32 reserved[3];
};
/*
@@ -899,7 +902,10 @@ struct core_rx_start_ramrod_data {
u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
/* if set, 802.1q tags will be removed and copied to CQE */
- u8 inner_vlan_removal_en;
+/* if set, 802.1q tags will be removed and copied to CQE */
+ u8 inner_vlan_stripping_en;
+/* if set, outer tag wont be stripped, valid only in MF OVLAN. */
+ u8 outer_vlan_stripping_dis;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
@@ -916,7 +922,7 @@ struct core_rx_start_ramrod_data {
struct core_rx_action_on_error action_on_error;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 reserved[7];
+ u8 reserved[6];
};
@@ -938,48 +944,51 @@ struct core_rx_stop_ramrod_data {
struct core_tx_bd_data {
__le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_DATA_START_BD_MASK 0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
* when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
/* Number of BDs that make up one packet - width wide enough to present
* CORE_LL2_TX_MAX_BDS_PER_PACKET
*/
-#define CORE_TX_BD_DATA_NBDS_MASK 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
* connType is ROCE (use enum core_roce_flavor_type)
*/
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
/* Calculate ip length */
-#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
-#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+/* disables the STAG insertion, relevant only in MF OVLAN mode. */
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
};
/*
@@ -1046,6 +1055,17 @@ struct core_tx_stop_ramrod_data {
/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ u8 reserved0;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved1[1];
+};
+
+
+/*
* Enum flag for what type of dcb data to update
*/
enum dcb_dscp_update_mode {
@@ -1182,6 +1202,63 @@ struct eth_ustorm_per_queue_stat {
/*
+ * Event Ring VF-PF Channel data
+ */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+ u8 vf_id /* Malicious VF ID */;
+ u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
+ __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved[7];
+};
+
+/*
+ * Event Data Union
+ */
+union event_ring_data {
+ u8 bytes[8] /* Byte Array */;
+ struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+ struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+/* Dedicated fields to iscsi connect done results */
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+/* VF Initial Cleanup data */
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+
+/*
+ * Event Ring Entry
+ */
+struct event_ring_entry {
+ u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
+ u8 opcode /* Event Opcode */;
+ __le16 reserved0 /* Reserved */;
+ __le16 echo /* Echo value from ramrod data on the host */;
+ u8 fw_return_code /* FW return code for SP ramrods */;
+ u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/*
* Event Ring Next Page Address
*/
struct event_ring_next_addr {
@@ -1211,6 +1288,18 @@ enum fw_flow_ctrl_mode {
/*
+ * GFT profile type.
+ */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */,
+/* L4 destination port, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */,
+ MAX_GFT_PROFILE_TYPE
+};
+
+
+/*
* Major and Minor hsi Versions
*/
struct hsi_fp_ver_struct {
@@ -1311,6 +1400,34 @@ struct mstorm_vf_zone {
/*
+ * vlan header including TPID and TCI fields
+ */
+struct vlan_header {
+ __le16 tpid /* Tag Protocol Identifier */;
+ __le16 tci /* Tag Control Information */;
+};
+
+/*
+ * outer tag configurations
+ */
+struct outer_tag_config_struct {
+/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with
+ * Host Control mode. Else - 0
+ */
+ u8 enable_stag_pri_change;
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ u8 pri_map_valid;
+ u8 reserved[2];
+/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
+ * identifier and outer tag control information
+ */
+ struct vlan_header outer_tag;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
+ u8 inner_to_outer_pri_map[8];
+};
+
+
+/*
* personality per PF
*/
enum personality_type {
@@ -1361,7 +1478,6 @@ struct pf_start_ramrod_data {
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
- __le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
@@ -1381,16 +1497,11 @@ struct pf_start_ramrod_data {
u8 integ_phase /* Integration phase */;
/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
u8 allow_npar_tx_switching;
-/* Map from inner to outer priority. Set pri_map_valid when init map */
- u8 inner_to_outer_pri_map[8];
-/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
- u8 pri_map_valid;
-/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
- * (lower 16 bits) and ethType to use (higher 16 bits)
- */
- __le32 outer_tag;
+ u8 reserved0;
/* FP HSI version to be used by FW */
struct hsi_fp_ver_struct hsi_fp_ver;
+/* Outer tag configurations */
+ struct outer_tag_config_struct outer_tag_config;
};
@@ -1441,15 +1552,19 @@ struct pf_update_tunnel_config {
* Data for port update ramrod
*/
struct pf_update_ramrod_data {
- u8 pf_id;
- u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
- u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
- u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+/* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_eth_dcb_data_mode;
+/* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_fcoe_dcb_data_mode;
+/* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_iscsi_dcb_data_mode;
u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+/* Update Enable STAG Priority Change indication */
+ u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
/* core iscsi related fields */
@@ -1460,7 +1575,11 @@ struct pf_update_ramrod_data {
/* core iwarp related fields */
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
- __le16 reserved;
+/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis
+ * and UFP with Host Control mode, else - 0.
+ */
+ u8 enable_stag_pri_change;
+ u8 reserved;
/* tunnel configuration. */
struct pf_update_tunnel_config tunnel_config;
};
@@ -1745,6 +1864,7 @@ enum vf_zone_size_mode {
+
/*
* Attentions status block
*/
@@ -1758,17 +1878,6 @@ struct atten_status_block {
/*
- * Igu cleanup bit values to distinguish between clean or producer consumer
- * update.
- */
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
-};
-
-
-/*
* DMAE command
*/
struct dmae_cmd {
@@ -2200,23 +2309,23 @@ struct qm_rf_opportunistic_mask {
/*
* QM hardware structure of QM map memory
*/
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
/* the first PQ associated with the VPORT and VOQ of this PQ */
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 7443ff9d..ebb66482 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1053,7 +1053,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -1107,7 +1107,9 @@ struct dbg_tools_data {
u8 chip_id /* Chip ID (from enum chip_ids) */;
u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
- u8 reserved;
+ u8 use_dmae /* Indicates if DMAE should be used */;
+/* Numbers of registers that were read since last log */
+ __le32 num_regs_read;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 397c408d..ffbf5c71 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -669,7 +669,7 @@ struct mstorm_eth_conn_st_ctx {
/*
* eth connection context
*/
-struct eth_conn_context {
+struct e4_eth_conn_context {
/* tstorm storm context */
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2] /* padding */;
@@ -765,6 +765,7 @@ enum eth_event_opcode {
ETH_EVENT_RX_DELETE_UDP_FILTER,
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@@ -882,6 +883,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
/* RX - Add/Delete a GFT Filter to the Searcher */
ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_ETH_RAMROD_CMD_ID
};
@@ -1092,7 +1094,7 @@ struct eth_vport_tx_mode {
/*
- * Ramrod data for rx create gft action
+ * GFT filter update action type.
*/
enum gft_filter_update_action {
GFT_ADD_FILTER,
@@ -1101,16 +1103,6 @@ enum gft_filter_update_action {
};
-/*
- * Ramrod data for rx create gft action
- */
-enum gft_logic_filter_type {
- GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
- RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
- MAX_GFT_LOGIC_FILTER_TYPE
-};
-
-
/*
@@ -1166,7 +1158,7 @@ struct rx_create_openflow_action_data {
*/
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id /* ID of RX queue */;
- __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+ __le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
__le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* index of the protocol index */;
@@ -1254,26 +1246,34 @@ struct rx_udp_filter_data {
/*
- * Ramrod to add filter - filter is packet headr of type of packet wished to
- * pass certin FW flow
+ * add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow
*/
struct rx_update_gft_filter_data {
/* Pointer to Packet Header That Defines GFT Filter */
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length /* Packet Header Length */;
-/* If is_rfs flag is set: Queue Id to associate filter with else: action icid */
- __le16 rx_qid_or_action_icid;
-/* Field is used if is_rfs flag is set: vport Id of which to associate filter
- * with
- */
- u8 vport_id;
-/* Use enum to set type of flow using gft HW logic blocks */
- u8 filter_type;
+/* Action icid. Valid if action_icid_valid flag set. */
+ __le16 action_icid;
+ __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
+ __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
+ u8 vport_id /* RX vport Id. */;
+/* If set, action_icid will used for GFT filter update. */
+ u8 action_icid_valid;
+/* If set, rx_qid will used for traffic steering, in additional to vport_id.
+ * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS.
+ */
+ u8 rx_qid_valid;
+/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If
+ * cleared, flow_id 0 will reported by CQE.
+ */
+ u8 flow_id_valid;
u8 filter_action /* Use to set type of action on filter */;
/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
* case of error.
*/
u8 assert_on_error;
+ u8 reserved[2];
};
@@ -1344,6 +1344,17 @@ struct tx_queue_stop_ramrod_data {
};
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
+
/*
* Ramrod data for vport update ramrod
@@ -1388,9 +1399,9 @@ struct vport_start_ramrod_data {
/* If set, ETH header padding will not inserted. placement_offset will be zero.
*/
u8 zero_placement_offset;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
u8 reserved[5];
};
@@ -1456,9 +1467,9 @@ struct vport_update_ramrod_data_cmn {
* updated
*/
u8 update_ctl_frame_checks_en_flg;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
u8 reserved[15];
};
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 2bcc32d3..84f273b0 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -64,7 +64,9 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
}
p_hwfn->p_ptt_pool = p_pool;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
@@ -83,8 +85,10 @@ void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
+#ifdef CONFIG_ECORE_LOCK_ALLOC
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
}
@@ -132,7 +136,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
@@ -155,7 +159,7 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
{
u32 prev_hw_addr;
- prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
@@ -177,7 +181,7 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr)
{
- u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
@@ -740,10 +744,10 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, ECORE_MSG_HW,
- "ecore_dmae_host2grc: Wait Failed. source_addr"
- " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+ "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
(unsigned long)src_addr, (unsigned long)dst_addr,
- length_dw);
+ length_dw,
+ (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
return ecore_status;
}
@@ -785,6 +789,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+ if (!cmd) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
+ (unsigned long)src_addr,
+ (unsigned long)dst_addr,
+ length_cur);
+ return ECORE_INVAL;
+ }
+
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 0750b2ed..0b9814f5 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -71,8 +71,10 @@ enum _dmae_cmd_crc_mask {
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
+* @param p_ptt
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
@@ -98,17 +100,6 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
- *
- * @param p_hwfn
- * @param p_ptt
- *
- * @return u32
- */
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
-
-/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
*
* @param p_hwfn
@@ -258,35 +249,6 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
-union ecore_qm_pq_params {
- struct {
- u8 q_idx;
- } iscsi;
-
- struct {
- u8 tc;
- } core;
-
- struct {
- u8 is_vf;
- u8 vf_id;
- u8 tc;
- } eth;
-
- struct {
- u8 dcqcn;
- u8 qpid; /* roce relative */
- } roce;
-
- struct {
- u8 qidx;
- } iwarp;
-};
-
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- union ecore_qm_pq_params *params);
-
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index b5ef173e..1da80a65 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -20,12 +20,12 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
-static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
{ 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
{ 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
{ 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
{ 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -43,6 +43,9 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
+
/* WFQ constants: */
/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
@@ -52,18 +55,19 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-/* 0.7 * upper bound (62500000) */
-#define QM_WFQ_MAX_INC_VAL 43750000
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
-/* RL constants: */
+/* Number of VOQs in E5 QmWfqCrd register */
+#define QM_WFQ_CRD_E5_NUM_VOQS 16
-/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
-#define QM_RL_UPPER_BOUND 62500000
+/* RL constants: */
/* Period in us */
#define QM_RL_PERIOD 5
@@ -71,18 +75,32 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-/* 0.7 * upper bound (62500000) */
-#define QM_RL_MAX_INC_VAL 43750000
-
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
- * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
- * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
- * although the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
- * this point.
- */
-#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
- QM_RL_PERIOD * 101) / (8 * 100)), 1)
+* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+* although the credit increment value was the correct one and FW calculated
+* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+* this point.
+*/
+#define QM_RL_INC_VAL(rate) \
+ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), 1)
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) \
+ ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -94,13 +112,17 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ ext_voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
- (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ ext_voq * \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
@@ -140,25 +162,58 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
- ((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phys_tcs_per_port) \
- ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
- LB_VOQ(port))
-
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
+ vp_pq_id, rl_id, ext_voq, wrr) \
+ do { \
+ OSAL_MEMSET(&map, 0, sizeof(map)); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
+ SET_FIELD(map.reg, \
+ QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
+ *((u32 *)&map)); \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
+ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
/******************** INTERNAL IMPLEMENTATION *********************/
+/* Returns the external VOQ number */
+static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 tc,
+ u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
+ else
+ return port_id * (max_phys_tcs_per_port) + tc;
+}
+
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (1 << MAX_NUM_VOQS) - 1);
+ (u32)voq_bit_mask);
+#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+#endif
/* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
@@ -169,7 +224,7 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_PF_RL_UPPER_BOUND);
}
}
@@ -200,7 +255,7 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@@ -220,17 +275,19 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
* the specified VOQ
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 voq, u16 cmdq_lines)
+ u8 ext_voq,
+ u16 cmdq_lines)
{
u32 qm_line_crd;
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
- qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
@@ -240,11 +297,12 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
- /* Clear PBF lines for all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u16 phys_lines, phys_lines_per_tc;
@@ -252,31 +310,35 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
if (!port_params[port_id].active)
continue;
- /* Find #lines to divide between the active physical TCs */
- phys_lines = port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
/* Find #lines per active physical TC */
num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
if (((port_params[port_id].active_phys_tcs >> tc) &
0x1) == 1)
num_tcs_in_port++;
phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* Init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ 0x1) == 1)
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
phys_lines_per_tc);
- }
}
/* Init registers for pure LB TC */
- ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -308,7 +370,7 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (!port_params[port_id].active)
@@ -339,18 +401,19 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
+ 0x1) == 1) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ phys_blocks);
}
}
/* Init pure LB TC */
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
pure_lb_blocks);
}
}
@@ -361,7 +424,6 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
@@ -401,12 +463,12 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- struct qm_rf_pq_map tx_pq_map;
+ u8 ext_voq, vport_id_in_pf;
bool is_vf_pq, rl_valid;
- u8 voq, vport_id_in_pf;
u16 first_tx_pq_id;
- voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
max_qm_global_rls;
@@ -416,16 +478,17 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
first_tx_pq_id =
vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
+
/* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
- (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
- QM_WFQ_VP_PQ_PF_SHIFT));
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id, map_val);
}
/* Check RL ID */
@@ -434,26 +497,29 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config\n");
- /* Fill PQ map entry */
- OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- rl_valid ? 1 : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- rl_valid ? pq_params[i].vport_id : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
- pq_params[i].wrr_group);
-
- /* Write PQ map entry to CAM */
- STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
- *((u32 *)&tx_pq_map));
+ /* Prepare PQ map entry */
+ struct qm_rf_pq_map_e4 tx_pq_map;
+ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
+ 1 : 0,
+ first_tx_pq_id, rl_valid ?
+ pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
/* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+ pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
+ pq_params[i].tc_id, port_id,
+ rl_valid ? 1 : 0, rl_valid ?
+ pq_params[i].vport_id : 0);
+ ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
@@ -473,10 +539,10 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids, u32 base_mem_addr_4kb)
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
u16 i, pq_id, pq_group;
@@ -518,13 +584,9 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params)
{
u32 inc_val, crd_reg_offset;
- u8 voq;
+ u8 ext_voq;
u16 i;
- crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
- (pf_id % MAX_NUM_PFS_BB);
-
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
@@ -533,14 +595,21 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
- OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (pf_id % MAX_NUM_PFS_BB);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
+ inc_val);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
@@ -552,7 +621,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid PF rate limit configuration\n");
return -1;
@@ -561,7 +630,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
@@ -612,6 +681,7 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
+ u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
@@ -625,8 +695,9 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration\n");
return -1;
@@ -636,7 +707,8 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -667,7 +739,9 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -684,10 +758,11 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -748,7 +823,6 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -759,6 +833,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
+ u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
@@ -775,16 +850,14 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
- num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
#endif
/* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, is_first_pf, num_pf_cids,
- num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
- start_vport, other_mem_size_4kb, pq_params,
- vport_params);
+ max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ start_pq, num_pf_pqs, num_vf_pqs, start_vport,
+ other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
@@ -803,7 +876,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Set VPORT RL */
if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, vport_params))
+ (p_hwfn, start_vport, num_vports, link_speed, vport_params))
return -1;
return 0;
@@ -832,7 +905,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid PF rate limit configuration\n");
return -1;
@@ -872,7 +945,9 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
}
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+ struct ecore_ptt *p_ptt, u8 vport_id,
+ u32 vport_rl,
+ u32 link_speed)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
@@ -882,8 +957,8 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration\n");
return -1;
@@ -1335,23 +1410,8 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
}
-/* In MF should be called once per engine to set EtherType of OuterTag */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
-{
- /* Update PRS register */
- STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update NIG register */
- STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update PBF register */
- STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-}
-
/* In MF should be called once per port to set EtherType of OuterTag */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
@@ -1497,35 +1557,23 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id)
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
{
- union gft_cam_line_union cam_line;
- struct gft_ram_line ram_line;
- u32 i, *ram_line_ptr;
-
- ram_line_ptr = (u32 *)&ram_line;
-
- /* Stop using gft logic, disable gft search */
+ /* disable gft search for PF */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
- /* Clean ram & cam for next rfs/gft session*/
+ /* Clean ram & cam for next gft session*/
/* Zero camline */
- OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- cam_line.cam_line_mapped.camline);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
-
- /* Each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(ram_line_ptr + i));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
}
@@ -1543,115 +1591,110 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
}
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
- bool ipv6)
+ bool ipv6,
+ enum gft_profile_type profile_type)
{
- u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- union gft_cam_line_union camLine;
- struct gft_ram_line ramLine;
- u32 *ramLinePointer = (u32 *)&ramLine;
- int i;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
if (!ipv6 && !ipv4)
- DP_NOTICE(p_hwfn, true,
- "set_rfs_mode_enable: must accept at "
- "least on of - ipv4 or ipv6");
-
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
- DP_NOTICE(p_hwfn, true,
- "set_rfs_mode_enable: must accept at "
- "least on of - udp or tcp");
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
/* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
- /* Configure Registers for RFS mode */
+ /* Do not load context only cid in PRS on match. */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
- /* Enable gft search */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
- * context only cid
- * in PRS on match
- */
- camLine.cam_line_mapped.camline = 0;
+ /* Do not use tenant ID exist bit for gft search*/
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
- /* Cam line is now valid!! */
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
/* Filters are per PF!! */
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camLine.cam_line_mapped.camline);
- camLine.cam_line_mapped.camline =
- ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+ cam_line);
+ cam_line = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ramLine.lo = 0;
- ramLine.hi = 0;
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
-
- /* Each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(ramLinePointer + i));
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ram_line_lo);
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ REG_SIZE, ram_line_hi);
/* Set default profile so that no filter match will happen */
- ramLine.lo = 0xffffffff;
- ramLine.hi = 0x3ff;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
- i * REG_SIZE, *(ramLinePointer + i));
+ /* Enable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
/* Configure VF zone size mode */
@@ -1726,16 +1769,13 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
#ifndef LINUX_REMOVE
#define CRC8_INIT_VALUE 0xFF
-#define CRC8_TABLE_SIZE 256
#endif
static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
- u8 conn_type,
- u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
@@ -1794,9 +1834,8 @@ static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@@ -1807,14 +1846,14 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+ u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@@ -1823,8 +1862,7 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
- 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 488dc005..ab560e59 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -18,7 +18,6 @@ struct init_qm_pq_params;
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -27,8 +26,7 @@ struct init_qm_pq_params;
*
* @return The required host memory size in 4KB units.
*/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
@@ -66,7 +64,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -80,6 +77,7 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* be 0. otherwise, the weight must be non-zero.
* @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
* configure. ignored if PF RL is globally disabled.
+ * @param link_speed - link speed in Mbps.
* @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
* each Tx PQ associated with the specified PF.
* @param vport_params - array of size num_vports with parameters for each
@@ -88,23 +86,23 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 port_id,
- u8 pf_id,
- u8 max_phys_tcs_per_port,
- bool is_first_pf,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 start_pq,
- u16 num_pf_pqs,
- u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
- u16 pf_wfq,
- u32 pf_rl,
- struct init_qm_pq_params *pq_params,
- struct init_qm_vport_params *vport_params);
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ u32 link_speed,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
@@ -157,17 +155,19 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
* VPORT.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
- u32 vport_rl);
+ u32 vport_rl,
+ u32 link_speed);
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
@@ -261,28 +261,15 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#ifndef UNUSED_HSI_FUNC
/**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
- * ethType Regs to input ethType
- * should Be called once per engine
- * if engine
- * is in BD mode.
- *
- * @param p_ptt - ptt window used for writing the registers.
- * @param ethType - etherType to configure
- */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
-
-/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
+ u32 ethType);
#endif /* UNUSED_HSI_FUNC */
/**
@@ -351,33 +338,35 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ * @brief ecore_gft_disable - Disable and GFT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable RFS.
+ * @param pf_id - pf on which to disable GFT.
*/
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id);
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
/**
-* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
+ * @brief ecore_gft_config - Enable and configure HW for GFT
*
* @param p_ptt - ptt window used for writing the registers.
-* @param pf_id - pf on which to enable RFS.
+ * @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
* @param udp - set profile udp packet.
* @param ipv4 - set profile ipv4 packet.
* @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
*/
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
- bool ipv6);
+ bool ipv6,
+ enum gft_profile_type profile_type);
#endif /* UNUSED_HSI_FUNC */
/**
@@ -431,26 +420,25 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
* @param ctx_type - context type.
* @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
+void ecore_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
+
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
- * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
+void ecore_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 tid);
+
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index b907a95e..91633c11 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -40,6 +40,13 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
+ val, rt_offset, RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -49,6 +56,14 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
{
osal_size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
+ rt_offset, (u32)(rt_offset + size - 1),
+ RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -161,8 +176,7 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 addr, u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -294,7 +308,7 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -303,10 +317,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
- ecore_init_rt(p_hwfn, p_ptt, addr,
- OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
- OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
- b_must_dmae);
+ rc = ecore_init_rt(p_hwfn, p_ptt, addr,
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+ b_must_dmae);
break;
}
@@ -382,10 +396,13 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
OSAL_LE32_TO_CPU(cmd->op_data));
}
-/* init_ops callbacks entry point */
+/* init_ops callbacks entry point.
+ * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is actually used.
+ */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ struct init_callback_op OSAL_UNUSED * p_cmd)
{
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
@@ -429,17 +446,16 @@ static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+ u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
- return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
- INIT_IF_PHASE_OP_CMD_OFFSET);
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
@@ -485,8 +501,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
- phase, phase_id);
+ cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
+ phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
@@ -510,7 +526,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
return rc;
}
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 gtt_base;
u32 i;
@@ -528,7 +545,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
@@ -537,7 +554,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
@@ -557,7 +574,11 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
- const u8 *data)
+#ifdef CONFIG_ECORE_BINARY_FW
+ const u8 *fw_data)
+#else
+ const u8 OSAL_UNUSED * fw_data)
+#endif
{
struct ecore_fw_data *fw = p_dev->fw_data;
@@ -565,24 +586,24 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
- if (!data) {
+ if (!fw_data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
- fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
- fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+ fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
- fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+ fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
- fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index d58c7d6a..e293a4a3 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -107,5 +107,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index b57c510c..e6cef85b 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -9,7 +9,6 @@
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
-#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_init_ops.h"
#include "ecore_rt_defs.h"
@@ -30,7 +29,7 @@ struct ecore_pi_info {
struct ecore_sb_sp_info {
struct ecore_sb_info sb_info;
/* per protocol index data */
- struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+ struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum ecore_attention_type {
@@ -248,21 +247,21 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
- DP_INFO(p_hwfn->p_dev,
- "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
- " [PF: %02x %s %02x]\n",
- tmp2, tmp,
- (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
- (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
- grc_timeout_attn_master_to_str((tmp &
- ECORE_GRC_ATTENTION_MASTER_MASK) >>
- ECORE_GRC_ATTENTION_MASTER_SHIFT),
- (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
- (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
+ : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str(
+ (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
- ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
- (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
- ECORE_GRC_ATTENTION_VF_SHIFT);
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
out:
/* Regardles of anything else, clean the validity bit */
@@ -414,31 +413,136 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
-#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
-#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define ECORE_DB_REC_COUNT 10
+#define ECORE_DB_REC_INTERVAL 100
+
+/* assumes sticky overflow indication was set for this PF */
+static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 count = ECORE_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interperted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releaseing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
+ return ECORE_TIMEOUT;
+ }
+
+ /* flush any pedning (e)dpm as they may never arrive */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* release overflow sticky indication (stop silently dropping
+ * everything)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* repeat all last doorbells (doorbell drop recovery) */
+ ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+
+ return ECORE_SUCCESS;
+}
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 reason;
+ u32 int_sts, first_drop_reason, details, address, overflow,
+ all_drops_reason;
+ struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ enum _ecore_status_t rc;
- reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
- ECORE_DORQ_ATTENTION_REASON_MASK;
- if (reason) {
- u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS);
+ int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
+ int_sts);
- DP_INFO(p_hwfn->p_dev,
- "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
- " Size [bytes] 0x%08x Reason: 0x%08x\n",
- ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS_ADDRESS),
- (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
- ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
- ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again
+ */
+ if (!int_sts)
+ return ECORE_SUCCESS;
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* obtain data about db drop/overflow */
+ first_drop_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ ECORE_DORQ_ATTENTION_REASON_MASK;
+ details = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+ address = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ overflow = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_PF_OVFL_STICKY);
+ all_drops_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* log info */
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
+ "Overflow\t0x%x\t\t(a per PF indication)\n",
+ address,
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason, overflow);
+
+ /* if this PF caused overflow, initiate recovery */
+ if (overflow) {
+ rc = ecore_db_rec_attn(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ /* clear the doorbell drop details and prepare for next drop */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* mark interrupt as handeld (note: even if drop was due to a
+ * different reason than overflow we mark as handled)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* if there are no indications otherthan drop indications,
+ * success
+ */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return ECORE_SUCCESS;
}
+ /* some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
return ECORE_INVAL;
}
@@ -851,32 +955,38 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
* @brief ecore_int_deassertion_parity - handle a single parity AEU source
*
* @param p_hwfn
- * @param p_aeu - descriptor of an AEU bit which caused the
- * parity
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param aeu_en_reg - address of the AEU enable register
* @param bit_index
*/
static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
- u8 bit_index)
+ u32 aeu_en_reg, u8 bit_index)
{
- u32 block_id = p_aeu->block_index;
+ u32 block_id = p_aeu->block_index, mask, val;
- DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
- p_aeu->bit_name, bit_index);
-
- if (block_id == MAX_BLOCK_ID)
- return;
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
- ecore_int_attn_print(p_hwfn, block_id,
- ATTN_TYPE_PARITY, false);
+ if (block_id != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
- /* In A0, there's a single parity bit for several blocks */
- if (block_id == BLOCK_BTB) {
- ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
- ATTN_TYPE_PARITY, false);
- ecore_int_attn_print(p_hwfn, BLOCK_MCP,
- ATTN_TYPE_PARITY, false);
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ ecore_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
+ }
}
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~(0x1 << bit_index);
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
}
/**
@@ -891,8 +1001,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
- bool b_parity = false;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
u8 i, j, k, bit_idx;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -908,11 +1017,11 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
/* Handle parity attentions first */
for (i = 0; i < NUM_ATTN_REGS; i++) {
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
- u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32));
+ u32 parities;
- u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
/* Skip register in which no parity bit is currently set */
if (!parities)
@@ -922,11 +1031,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
- !!(parities & (1 << bit_idx))) {
+ !!(parities & (1 << bit_idx)))
ecore_int_deassertion_parity(p_hwfn, p_bit,
- bit_idx);
- b_parity = true;
- }
+ aeu_en, bit_idx);
bit_idx += ATTENTION_LENGTH(p_bit->flags);
}
@@ -941,10 +1048,13 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
continue;
for (i = 0; i < NUM_ATTN_REGS; i++) {
- u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
- u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
- u32 bits = aeu_inv_arr[i] & en;
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
/* Skip if no bit from this group is currently set */
if (!bits)
@@ -1369,6 +1479,49 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
+static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;/* @@@TBD MichalK- VF CAU... */
+
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
+ pi_index, coalescing_fsm, timeset);
+}
+
void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t sb_phys, u16 igu_sb_id,
@@ -1420,8 +1573,9 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
else
timer_res = 2;
timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
- ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- ECORE_COAL_RX_STATE_MACHINE, timeset);
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ ECORE_COAL_RX_STATE_MACHINE,
+ timeset);
if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -1431,46 +1585,14 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
- ecore_int_cau_conf_pi(p_hwfn, p_ptt,
- igu_sb_id, TX_PI(i),
- ECORE_COAL_TX_STATE_MACHINE,
- timeset);
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ ECORE_COAL_TX_STATE_MACHINE,
+ timeset);
}
}
}
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 igu_sb_id, u32 pi_index,
- enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
-{
- struct cau_pi_entry pi_entry;
- u32 sb_offset, pi_offset;
-
- if (IS_VF(p_hwfn->p_dev))
- return; /* @@@TBD MichalK- VF CAU... */
-
- sb_offset = igu_sb_id * PIS_PER_SB;
- OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
- if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
- else
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
-
- pi_offset = sb_offset + pi_index;
- if (p_hwfn->hw_init_done) {
- ecore_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
- *((u32 *)&(pi_entry)));
- } else {
- STORE_RT_REG(p_hwfn,
- CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
- *((u32 *)&(pi_entry)));
- }
-}
-
void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
{
@@ -1483,16 +1605,50 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
sb_info->igu_sb_id, 0, 0);
}
-/**
- * @brief ecore_get_igu_sb_id - given a sw sb_id return the
- * igu_sb_id
- *
- * @param p_hwfn
- * @param sb_id
- *
- * @return u16
- */
-static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
+ b_is_pf)
+ return p_block;
+ }
+
+ return OSAL_NULL;
+}
+
+static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 vector_id)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return ECORE_SB_INVALID_IDX;
+}
+
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -1500,11 +1656,15 @@ static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
if (sb_id == ECORE_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else if (IS_PF(p_hwfn->p_dev))
- igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
else
igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
- if (sb_id == ECORE_SP_SB_ID)
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath SB vector %04x doesn't exist\n",
+ sb_id);
+ else if (sb_id == ECORE_SP_SB_ID)
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
else
@@ -1525,9 +1685,24 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+ if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+
+ /* Let the igu info reference the client's SB info */
if (sb_id != ECORE_SP_SB_ID) {
- p_hwfn->sbs_info[sb_id] = sb_info;
- p_hwfn->num_sbs++;
+ if (IS_PF(p_hwfn->p_dev)) {
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
sb_info->p_hwfn = p_hwfn;
@@ -1559,20 +1734,35 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *sb_info,
u16 sb_id)
{
- if (sb_id == ECORE_SP_SB_ID) {
- DP_ERR(p_hwfn, "Do Not free sp sb using this function");
- return ECORE_INVAL;
- }
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ if (sb_info == OSAL_NULL)
+ return ECORE_SUCCESS;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
- p_hwfn->sbs_info[sb_id] = OSAL_NULL;
- p_hwfn->num_sbs--;
+ if (IS_VF(p_hwfn->p_dev)) {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
+ return ECORE_SUCCESS;
}
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (p_block->vector_number == 0) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return ECORE_INVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = OSAL_NULL;
+ p_block->status |= ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
return ECORE_SUCCESS;
}
@@ -1735,15 +1925,6 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 tmp;
-
- /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
- * attentions. Since we're waiting for BRCM answer regarding this
- * attention, in the meanwhile we simply mask it.
- */
- tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
- tmp &= ~0x800;
- ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
ecore_int_igu_enable_attn(p_hwfn, p_ptt);
@@ -1778,11 +1959,13 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 sb_id, bool cleanup_set, u16 opaque_fid)
+ struct ecore_ptt *p_ptt,
+ u32 igu_sb_id,
+ bool cleanup_set,
+ u16 opaque_fid)
{
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
- u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
u8 type = 0; /* FIXME MichalS type??? */
@@ -1813,8 +1996,8 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
OSAL_MMIOWB(p_hwfn->p_dev);
/* calculate where to read the status bit from */
- sb_bit = 1 << (sb_id % 32);
- sb_bit_addr = sb_id / 32 * sizeof(u32);
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
@@ -1829,21 +2012,28 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
if (!sleep_cnt)
DP_NOTICE(p_hwfn, true,
"Timeout waiting for clear status 0x%08x [for sb %d]\n",
- val, sb_id);
+ val, igu_sb_id);
}
void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set)
+ u16 igu_sb_id, u16 opaque, bool b_set)
{
+ struct ecore_igu_block *p_block;
int pi, i;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id, p_block->function_id, p_block->is_pf,
+ p_block->vector_number);
+
/* Set */
if (b_set)
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
/* Clear */
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
/* Wait for the IGU SB to cleanup */
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
@@ -1851,8 +2041,8 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
val = ecore_rd(p_hwfn, p_ptt,
IGU_REG_WRITE_DONE_PENDING +
- ((sb_id / 32) * 4));
- if (val & (1 << (sb_id % 32)))
+ ((igu_sb_id / 32) * 4));
+ if (val & (1 << (igu_sb_id % 32)))
OSAL_UDELAY(10);
else
break;
@@ -1860,21 +2050,22 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
if (i == IGU_CLEANUP_SLEEP_LENGTH)
DP_NOTICE(p_hwfn, true,
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
- sb_id);
+ igu_sb_id);
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
ecore_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
}
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_set, bool b_slowpath)
{
- u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
- u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0, val = 0;
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
/* @@@TBD MichalK temporary... should be moved to init-tool... */
val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
@@ -1883,53 +2074,204 @@ void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
/* end temporary */
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU cleaning SBs [%d,...,%d]\n",
- igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
- for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
- ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & ECORE_IGU_STATUS_DSB))
+ continue;
+
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
+ }
- if (!b_slowpath)
- return;
+ if (b_slowpath)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+}
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid, b_set);
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, ECORE_SB)) {
+ /* We're using an old MFW - have to prevent any switching
+ * of SBs between PF and VFs as later driver wouldn't be
+ * able to tell which belongs to which.
+ */
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, ECORE_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
+ }
+
+ /* TODO - how do we learn about VF SBs from MFW? */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov -
+ p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn, true,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return ECORE_INVAL;
+ }
+ }
+ }
+
+ /* Cap the number of VFs SBs by the number of VFs */
+ if (IS_PF_SRIOV(p_hwfn))
+ p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & ECORE_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id,
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number,
+ rval, val);
+ }
+ }
+
+ return 0;
+}
+
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
+
+ /* Return all the usage indications to default prior to the reset;
+ * The reset expects the !orig to reflect the initial status of the
+ * SBs, and would re-calculate the originals based on those.
+ */
+ p_cnt->cnt = p_cnt->orig;
+ p_cnt->free_cnt = p_cnt->orig;
+ p_cnt->iov_cnt = p_cnt->iov_orig;
+ p_cnt->free_cnt_iov = p_cnt->iov_orig;
+ p_cnt->orig = 0;
+ p_cnt->iov_orig = 0;
+
+ /* TODO - we probably need to re-configure the CAU as well... */
+ return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
}
-static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 sb_id)
+static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id)
{
u32 val = ecore_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
struct ecore_igu_block *p_block;
- p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- goto out;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
/* Fill the block information */
- p_block->status = ECORE_IGU_STATUS_VALID;
p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
- " is_pf = %d vector_num = 0x%x\n",
- sb_id, val, p_block->function_id, p_block->is_pf,
- p_block->vector_number);
-
-out:
- return val;
+ p_block->igu_sb_id = igu_sb_id;
}
enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
@@ -1937,140 +2279,217 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
{
struct ecore_igu_info *p_igu_info;
struct ecore_igu_block *p_block;
- u32 min_vf = 0, max_vf = 0, val;
- u16 sb_id, last_iov_sb_id = 0;
- u16 prev_sb_id = 0xFF;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
- p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
- GFP_KERNEL,
- sizeof(*p_igu_info));
+ p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_igu_info));
if (!p_hwfn->hw_info.p_igu_info)
return ECORE_NOMEM;
-
- OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
-
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs and VFs */
- p_igu_info->igu_base_sb = 0xffff;
- p_igu_info->igu_sb_cnt = 0;
- p_igu_info->igu_dsb_id = 0xffff;
- p_igu_info->igu_base_sb_iov = 0xffff;
+ /* Distinguish between existent and onn-existent default SB */
+ p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
+ /* Find the range of VF ids whose SB belong to this PF */
if (p_hwfn->p_dev->p_iov_info) {
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
- for (sb_id = 0;
- sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
- val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- break;
- if (p_block->is_pf) {
- if (p_block->function_id == p_hwfn->rel_pf_id) {
- p_block->status |= ECORE_IGU_STATUS_PF;
-
- if (p_block->vector_number == 0) {
- if (p_igu_info->igu_dsb_id == 0xffff)
- p_igu_info->igu_dsb_id = sb_id;
- } else {
- if (p_igu_info->igu_base_sb == 0xffff) {
- p_igu_info->igu_base_sb = sb_id;
- } else if (prev_sb_id != sb_id - 1) {
- DP_NOTICE(p_hwfn->p_dev, false,
- "consecutive igu"
- " vectors for HWFN"
- " %x broken",
- p_hwfn->rel_pf_id);
- break;
- }
- prev_sb_id = sb_id;
- /* we don't count the default */
- (p_igu_info->igu_sb_cnt)++;
- }
- }
- } else {
- if ((p_block->function_id >= min_vf) &&
- (p_block->function_id < max_vf)) {
- /* Available for VFs of this PF */
- if (p_igu_info->igu_base_sb_iov == 0xffff) {
- p_igu_info->igu_base_sb_iov = sb_id;
- } else if (last_iov_sb_id != sb_id - 1) {
- if (!val)
- DP_VERBOSE(p_hwfn->p_dev,
- ECORE_MSG_INTR,
- "First uninited IGU"
- " CAM entry at"
- " index 0x%04x\n",
- sb_id);
- else
- DP_NOTICE(p_hwfn->p_dev, false,
- "Consecutive igu"
- " vectors for HWFN"
- " %x vfs is broken"
- " [jumps from %04x"
- " to %04x]\n",
- p_hwfn->rel_pf_id,
- last_iov_sb_id,
- sb_id);
- break;
- }
- p_block->status |= ECORE_IGU_STATUS_FREE;
- p_hwfn->hw_info.p_igu_info->free_blks++;
- last_iov_sb_id = sb_id;
- }
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
+ }
+
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= ECORE_IGU_STATUS_DSB;
}
+
+ /* While this isn't suitable for all clients, limit number
+ * of prints by having each PF print only its entries with the
+ * exception of PF0 which would print everything.
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+ }
+
+ if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
+ return ECORE_INVAL;
}
- /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
- * the number of VF SBs [especially for first VF on engine, as we can't
- * diffrentiate between empty entries and its entries].
- * Since we don't really support more SBs than VFs today, prevent any
- * such configuration by sanitizing the number of SBs to equal the
- * number of VFs.
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
+ p_igu_info->usage.iov_cnt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block = OSAL_NULL;
+ u16 igu_sb_id = 0, vf_num = 0;
+ u32 val = 0;
+
+ if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
+ return ECORE_INVAL;
+
+ if (sb_id == ECORE_SP_SB_ID)
+ return ECORE_INVAL;
+
+ if (!p_info->b_allow_pf_vf_change) {
+ DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
+ return ECORE_INVAL;
+ }
+
+ /* If we're moving a SB from PF to VF, the client had to specify
+ * which vector it wants to move.
*/
- if (IS_PF_SRIOV(p_hwfn)) {
- u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-
- if (total_vfs < p_igu_info->free_blks) {
- DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
- "Limiting number of SBs for IOV - %04x --> %04x\n",
- p_igu_info->free_blks,
- p_hwfn->p_dev->p_iov_info->total_vfs);
- p_igu_info->free_blks = total_vfs;
- } else if (total_vfs > p_igu_info->free_blks) {
- DP_NOTICE(p_hwfn, true,
- "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
- p_igu_info->free_blks, total_vfs);
+ if (b_to_vf) {
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
return ECORE_INVAL;
- }
}
- p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+ /* If we're moving a SB from VF to PF, need to validate there isn't
+ * already a line configured for that vector.
+ */
+ if (!b_to_vf) {
+ if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
+ ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
- "igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
- p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
- p_igu_info->igu_dsb_id);
-
- if (p_igu_info->igu_base_sb == 0xffff ||
- p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
- DP_NOTICE(p_hwfn, true,
- "IGU CAM returned invalid values igu_base_sb=0x%x "
- "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ /* We need to validate that the SB can actually be relocated.
+ * This would also handle the previous case where we've explicitly
+ * stated which IGU SB needs to move.
+ */
+ for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE) ||
+ (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
+ if (b_to_vf)
+ return ECORE_INVAL;
+ else
+ continue;
+ }
+
+ break;
+ }
+
+ if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Failed to find a free SB to move\n");
return ECORE_INVAL;
}
+ /* At this point, p_block points to the SB we want to relocate */
+ if (b_to_vf) {
+ p_block->status &= ~ECORE_IGU_STATUS_PF;
+
+ /* It doesn't matter which VF number we choose, since we're
+ * going to disable the line; But let's keep it in range.
+ */
+ vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+ p_block->function_id = (u8)vf_num;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+
+ p_info->usage.cnt--;
+ p_info->usage.free_cnt--;
+ p_info->usage.iov_cnt++;
+ p_info->usage.free_cnt_iov++;
+
+ /* TODO - if SBs aren't really the limiting factor,
+ * then it might not be accurate [in the since that
+ * we might not need decrement the feature].
+ */
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
+ } else {
+ p_block->status |= ECORE_IGU_STATUS_PF;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = sb_id + 1;
+
+ p_info->usage.cnt++;
+ p_info->usage.free_cnt++;
+ p_info->usage.iov_cnt--;
+ p_info->usage.free_cnt_iov--;
+
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
+ }
+
+ /* Update the IGU and CAU with the new configuration */
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
+ val);
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
+ igu_sb_id, vf_num,
+ p_block->is_pf ? 0 : 1);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
return ECORE_SUCCESS;
}
@@ -2170,14 +2589,13 @@ void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info)
{
- struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
- if (!info || !p_sb_cnt_info)
+ if (!p_igu_info || !p_sb_cnt_info)
return;
- p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
- p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
- p_sb_cnt_info->sb_free_blk = info->free_blks;
+ OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
+ sizeof(*p_sb_cnt_info));
}
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
@@ -2249,10 +2667,11 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
IGU_REG_CONSUMER_MEM + sbid * 4);
- for (i = 0; i < PIS_PER_SB; i++)
+ for (i = 0; i < PIS_PER_SB_E4; i++)
p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY +
- sbid * 4 * PIS_PER_SB + i * 4);
+ sbid * 4 * PIS_PER_SB_E4 +
+ i * 4);
return ECORE_SUCCESS;
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 067ed605..563051c3 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -19,33 +19,78 @@
#define ECORE_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+
+#define ECORE_SB_INVALID_IDX 0xffff
struct ecore_igu_block {
u8 status;
#define ECORE_IGU_STATUS_FREE 0x01
#define ECORE_IGU_STATUS_VALID 0x02
#define ECORE_IGU_STATUS_PF 0x04
+#define ECORE_IGU_STATUS_DSB 0x08
u8 vector_number;
u8 function_id;
u8 is_pf;
-};
-struct ecore_igu_map {
- struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
+
+ struct ecore_sb_info *sb_info;
};
struct ecore_igu_info {
- struct ecore_igu_map igu_map;
+ struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id;
- u16 igu_base_sb;
- u16 igu_base_sb_iov;
- u16 igu_sb_cnt;
- u16 igu_sb_cnt_iov;
- u16 free_blks;
+
+ /* The numbers can shift when using APIs to switch SBs between PF and
+ * VF.
+ */
+ struct ecore_sb_cnt_info usage;
+
+ /* Determine whether we can shift SBs between VFs and PFs */
+ bool b_allow_pf_vf_change;
};
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Make sure IGU CAM reflects the default resources once again,
+ * starting with a 'dirty' SW database.
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, OSAL_NULL if none is available
+ */
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
/* TODO Names of function may change... */
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -125,9 +170,11 @@ u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
-void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set);
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 sb_id,
+ u16 opaque,
+ bool b_set);
/**
* @brief ecore_int_cau_conf - configure cau for a given status
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 799fbe82..24cdf5ed 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -26,7 +26,7 @@ enum ecore_int_mode {
#endif
struct ecore_sb_info {
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -44,13 +44,19 @@ struct ecore_sb_info {
struct ecore_sb_info_dbg {
u32 igu_prod;
u32 igu_cons;
- u16 pi[PIS_PER_SB];
+ u16 pi[PIS_PER_SB_E4];
};
struct ecore_sb_cnt_info {
- int sb_cnt;
- int sb_iov_cnt;
- int sb_free_blk;
+ /* Original, current, and free SBs for PF */
+ int orig;
+ int cnt;
+ int free_cnt;
+
+ /* Original, current and free SBS for child VFs */
+ int iov_orig;
+ int iov_cnt;
+ int free_cnt_iov;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
@@ -61,7 +67,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
/* barrier(); status block is written to by the chip */
/* FIXME: need some sort of barrier. */
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_PROD_INDEX_MASK;
+ STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
@@ -173,17 +179,17 @@ enum ecore_coalescing_fsm {
*
* @param p_hwfn
* @param p_ptt
- * @param igu_sb_id
+ * @param p_sb
* @param pi_index
* @param state
* @param timeset
*/
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum ecore_coalescing_fsm coalescing_fsm,
- u8 timeset);
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset);
/**
*
@@ -219,6 +225,7 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
#define ECORE_SP_SB_ID 0xffff
+
/**
* @brief ecore_int_sb_init - Initializes the sb_info structure.
*
@@ -324,4 +331,18 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *p_sb,
struct ecore_sb_info_dbg *p_info);
+/**
+ * @brief - Move a free Status block between PF and child VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
+ * from VF, given-up if moving to VF]
+ * @param b_to_vf - PF->VF == true, VF->PF == false
+ *
+ * @return ECORE_SUCCESS if SB successfully moved.
+ */
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf);
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 50cb3f2b..218ef50b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -345,21 +345,13 @@ ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
u16 vfid, bool b_enabled_only);
/**
- * @brief Set pending events bitmap for given @vfid
- *
- * @param p_hwfn
- * @param vfid
- */
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
-
-/**
- * @brief Copy pending events bitmap in @events and clear
- * original copy of events
+ * @brief fills a bitmask of all VFs which have pending unhandled
+ * messages.
*
* @param p_hwfn
*/
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events);
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events);
/**
* @brief Copy VF's message to PF's buffer
@@ -693,25 +685,60 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
* @return - rate in Mbps
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+
#endif
/**
+ * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce
+ * parameters of VFs for Rx and Tx queue.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param vf_id
+ * @param qid
+ *
+ * @return int
+ **/
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid);
+
+/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
- * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief Set whether PF should communicate with VF using SW/HW channel
+ * Needs to be called for an enabled VF before acquire is over
+ * [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
+ *
+ * @param p_hwfn
+ * @param vfid - relative vf index
+ * @param b_is_hw - true iff PF is to use HW channel for communication
+ */
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw);
+#endif
#endif /* CONFIG_ECORE_SRIOV */
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < E4_MAX_NUM_VFS; \
+ _i < MAX_NUM_VFS_E4; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index b4bfe89f..360d7f88 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -193,5 +193,13 @@
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
((roce_pf_id) * IRO[48].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
+ ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
+ ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index bc8df8f8..41532eeb 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[49] = {
+static const struct iro iro_arr[51] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
{ 0x4cb0, 0x80, 0x0, 0x0, 0x80},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6518, 0x20, 0x0, 0x0, 0x20},
+ { 0x6508, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -29,9 +29,9 @@ static const struct iro iro_arr[49] = {
/* XSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x3df0, 0x0, 0x0, 0x0, 0x78},
+ { 0x3e10, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x29b0, 0x0, 0x0, 0x0, 0x78},
+ { 0x2b50, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c38, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
@@ -41,11 +41,11 @@ static const struct iro iro_arr[49] = {
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x61f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x61e8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0xbd20, 0x30, 0x0, 0x0, 0x30},
+ { 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x95b8, 0x30, 0x0, 0x0, 0x30},
+ { 0x96b8, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x4b60, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
@@ -59,11 +59,11 @@ static const struct iro iro_arr[49] = {
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x8150, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xec70, 0x60, 0x0, 0x0, 0x60},
+ { 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2b48, 0x80, 0x0, 0x0, 0x38},
+ { 0x2ce8, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
+ { 0xf2b0, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
@@ -81,33 +81,37 @@ static const struct iro iro_arr[49] = {
/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8},
/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0x200, 0x10, 0x8, 0x0, 0x8},
+ { 0x200, 0x18, 0x8, 0x0, 0x8},
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0xb78, 0x10, 0x8, 0x0, 0x2},
+ { 0xb78, 0x18, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
+ { 0xd878, 0x50, 0x0, 0x0, 0x3c},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x12988, 0x10, 0x0, 0x0, 0x8},
+ { 0x12908, 0x18, 0x0, 0x0, 0x10},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x11fa0, 0x38, 0x0, 0x0, 0x18},
+ { 0x11aa8, 0x40, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ { 0xa580, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x30, 0x0, 0x0, 0x18},
+ { 0x86f8, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x101f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x102f8, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0xde28, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10660, 0x20, 0x0, 0x0, 0x20},
+ { 0x10760, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2b80, 0x80, 0x0, 0x0, 0x10},
+ { 0x2d20, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5020, 0x10, 0x0, 0x0, 0x10},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xc9b0, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
{ 0xeec0, 0x10, 0x0, 0x0, 0x10},
+/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
+ { 0xa398, 0x10, 0x0, 0x0, 0x10},
+/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
+ { 0x13100, 0x8, 0x0, 0x0, 0x8},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e58b8fa0..e3afc8a3 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -173,16 +173,19 @@ static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid)
{
- /* For VF-queues, stuff is a bit complicated as:
- * - They always maintain the qid_usage on their own.
- * - In legacy mode, they also maintain their CIDs.
- */
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID);
- /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
- if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
+ * For legacy vf-queues, the CID doesn't go through here.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
- if (!p_cid->b_legacy_vf)
+
+ /* VFs maintain the index inside queue-zone on their own */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF)
ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
OSAL_VFREE(p_hwfn->p_dev, p_cid);
}
@@ -193,6 +196,7 @@ static struct ecore_queue_cid *
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
u16 opaque_fid, u32 cid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@@ -204,14 +208,21 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
- p_cid->rel = *p_params;
p_cid->p_owner = p_hwfn;
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
/* Fill-in bits related to VFs' queues if information was provided */
if (p_vf_params != OSAL_NULL) {
p_cid->vfid = p_vf_params->vfid;
p_cid->vf_qid = p_vf_params->vf_qid;
- p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
} else {
p_cid->vfid = ECORE_QUEUE_CID_PF;
}
@@ -224,7 +235,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
}
/* Calculate the engine-absolute indices of the resources.
- * The would guarantee they're valid later on.
+ * This would guarantee they're valid later on.
* In some cases [SBs] we already have the right values.
*/
rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
@@ -248,10 +259,6 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
- /* SBs relevant information was already provided as absolute */
- p_cid->abs.sb = p_cid->rel.sb;
- p_cid->abs.sb_idx = p_cid->rel.sb_idx;
-
out:
/* VF-images have provided the qid_usage_idx on their own.
* Otherwise, we need to allocate a unique one.
@@ -270,7 +277,7 @@ out:
p_cid->rel.queue_id, p_cid->qid_usage_idx,
p_cid->abs.queue_id,
p_cid->rel.stats_id, p_cid->abs.stats_id,
- p_cid->abs.sb, p_cid->abs.sb_idx);
+ p_cid->sb_igu_id, p_cid->sb_idx);
return p_cid;
@@ -282,6 +289,7 @@ fail:
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@@ -296,7 +304,8 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
if (p_vf_params) {
vfid = p_vf_params->vfid;
- if (p_vf_params->b_legacy) {
+ if (p_vf_params->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID) {
b_legacy_vf = true;
cid = p_vf_params->vf_qid;
}
@@ -315,7 +324,7 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
}
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
- p_params, p_vf_params);
+ p_params, b_is_rx, p_vf_params);
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, cid, vfid);
@@ -324,9 +333,11 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
static struct ecore_queue_cid *
ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ bool b_is_rx,
struct ecore_queue_start_common_params *p_params)
{
- return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ OSAL_NULL);
}
enum _ecore_status_t
@@ -336,6 +347,7 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ struct eth_vport_tpa_param *p_tpa;
u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
@@ -360,8 +372,8 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->vport_id = abs_vport_id;
p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
- p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
p_ramrod->untagged = p_params->only_untagged;
p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
@@ -396,22 +408,22 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
/* TPA related fields */
- OSAL_MEMSET(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
- p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa = &p_ramrod->tpa_param;
+ OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
switch (p_params->tpa_mode) {
case ECORE_TPA_MODE_GRO:
- p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
- p_ramrod->tpa_param.tpa_max_size = (u16)-1;
- p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
- p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
- p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
- p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
- p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_tpa->tpa_max_size = (u16)-1;
+ p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
+ p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
+ p_tpa->tpa_ipv4_en_flg = 1;
+ p_tpa->tpa_ipv6_en_flg = 1;
+ p_tpa->tpa_ipv4_tunn_en_flg = 1;
+ p_tpa->tpa_ipv6_tunn_en_flg = 1;
+ p_tpa->tpa_pkt_split_flg = 1;
+ p_tpa->tpa_gro_consistent_flg = 1;
break;
default:
break;
@@ -427,8 +439,7 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
- p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
- p_params->concrete_fid);
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
@@ -454,6 +465,7 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct ecore_rss_params *p_rss)
{
struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
int i, table_size;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -480,26 +492,26 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities = 0;
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
p_config->tbl_size = p_rss->rss_table_size_log;
- p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+ p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
"update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
@@ -627,11 +639,11 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
}
static void
-ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
+ u16 val;
if (!p_params) {
p_ramrod->common.update_tpa_param_flg = 0;
@@ -653,14 +665,16 @@ ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
- p_tpa->tpa_max_size = p_params->tpa_max_size;
- p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
- p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+ val = p_params->tpa_max_size;
+ p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_cont;
+ p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
}
static void
-ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sp_vport_update_params *p_params)
{
int i;
@@ -769,11 +783,10 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
}
/* Update mcast bins for VFs, PF doesn't use this functionality */
- ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+ ecore_sp_update_mcast_bin(p_ramrod, p_params);
ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
- ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
- p_params->sge_tpa_params);
+ ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
if (p_params->mtu) {
p_ramrod->common.update_mtu_flg = 1;
p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
@@ -897,7 +910,7 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
- p_cid->abs.vport_id, p_cid->abs.sb);
+ p_cid->abs.vport_id, p_cid->sb_igu_id);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -913,8 +926,8 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
@@ -928,12 +941,15 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD);
+
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ b_legacy_vf ? " [legacy]" : "",
p_cid->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
@@ -979,7 +995,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
/* Allocate a CID for the queue */
- p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (p_cid == OSAL_NULL)
return ECORE_NOMEM;
@@ -1146,8 +1162,8 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
@@ -1195,7 +1211,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc;
- p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (p_cid == OSAL_NULL)
return ECORE_INVAL;
@@ -1494,8 +1510,7 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* Note: crc32_length MUST be aligned to 8
* Return:
******************************************************************************/
-static u32 ecore_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length, u32 crc32_seed, u8 complement)
+static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
{
u32 byte = 0, bit = 0, crc32_result = crc32_seed;
u8 msb = 0, current_byte = 0;
@@ -1520,25 +1535,23 @@ static u32 ecore_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+static u32 ecore_crc32c_le(u32 seed, u8 *mac)
{
u32 packet_buf[2] = { 0 };
OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
- return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
}
u8 ecore_mcast_bin_from_mac(u8 *mac)
{
- u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
- mac, ETH_ALEN);
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
return crc & 0xff;
}
static enum _ecore_status_t
ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
struct ecore_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@@ -1633,16 +1646,13 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- u16 opaque_fid;
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
- opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_mcast(p_hwfn,
- opaque_fid,
p_filter_cmd,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS)
@@ -1732,8 +1742,7 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_eth_stats *p_stats,
- u16 statistics_bin)
+ struct ecore_eth_stats *p_stats)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1945,7 +1954,7 @@ void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
{
__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
- __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
#ifndef ASIC_ONLY
@@ -1970,6 +1979,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ bool b_get_port_stats;
if (IS_PF(p_dev)) {
/* The main vport index is relative first */
@@ -1984,8 +1994,9 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
continue;
}
+ b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
- IS_PF(p_dev) ? true : false);
+ b_get_port_stats);
out:
if (IS_PF(p_dev) && p_ptt)
@@ -2061,12 +2072,16 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
{
+ if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
+ return;
+
if (p_cfg_params->arfs_enable) {
- ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_cfg_params->tcp,
- p_cfg_params->udp,
- p_cfg_params->ipv4,
- p_cfg_params->ipv6);
+ ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ GFT_PROFILE_TYPE_4_TUPLE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2074,7 +2089,7 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
p_cfg_params->ipv4 ? "Enable" : "Disable",
p_cfg_params->ipv6 ? "Enable" : "Disable");
} else {
- ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
p_cfg_params->arfs_enable ? "Enable" : "Disable");
@@ -2082,7 +2097,6 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
@@ -2126,9 +2140,17 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
- p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ p_ramrod->action_icid_valid = 0;
+ p_ramrod->action_icid = 0;
+
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->filter_type = RFS_FILTER_TYPE;
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2140,3 +2162,108 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+
+int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_rx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_rx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_tx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_tx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
+ void *handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Unable to read queue calescing\n");
+
+ return rc;
+ }
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (p_cid->b_is_rx) {
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ } else {
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 7fe4cbcb..f4212cf2 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -18,7 +18,16 @@
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define ECORE_QUEUE_CID_PF (0xff)
-/* Additional parameters required for initialization of the queue_cid
+/* Almost identical to the ecore_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct ecore_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+ /* Additional parameters required for initialization of the queue_cid
* and are relevant only for a PF initializing one for its VFs.
*/
struct ecore_queue_cid_vf_params {
@@ -34,7 +43,7 @@ struct ecore_queue_cid_vf_params {
* - Producers would be placed in a different place.
* - Makes assumptions regarding the CIDs.
*/
- bool b_legacy;
+ u8 vf_legacy;
/* For VFs, this index arrives via TLV to diffrentiate between
* different queues opened on the same qzone, and is passed
@@ -44,16 +53,19 @@ struct ecore_queue_cid_vf_params {
};
struct ecore_queue_cid {
- /* 'Relative' is a relative term ;-). Usually the indices [not counting
- * SBs] would be PF-relative, but there are some cases where that isn't
- * the case - specifically for a PF configuring its VF indices it's
- * possible some fields [E.g., stats-id] in 'rel' would already be abs.
- */
- struct ecore_queue_start_common_params rel;
- struct ecore_queue_start_common_params abs;
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct ecore_queue_cid_params rel;
+ struct ecore_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
u32 cid;
u16 opaque_fid;
+ bool b_is_rx;
+
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
@@ -69,7 +81,9 @@ struct ecore_queue_cid {
u8 qid_usage_idx;
/* Legacy VFs might have Rx producer located elsewhere */
- bool b_legacy_vf;
+ u8 vf_legacy;
+#define ECORE_QCID_LEGACY_VF_RX_PROD (1 << 0)
+#define ECORE_QCID_LEGACY_VF_CID (1 << 1)
struct ecore_hwfn *p_owner;
};
@@ -84,6 +98,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
@@ -129,31 +144,24 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u8 ecore_mcast_bin_from_mac(u8 *mac);
-/**
- * @brief - ecore_configure_rfs_ntuple_filter
- *
- * This ramrod should be used to add or remove arfs hw filter
- *
- * @params p_hwfn
- * @params p_ptt
- * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
- it with cookie and callback function address, if not
- using this mode then client must pass NULL.
- * @params p_addr p_addr is an actual packet header that needs to be
- * filter. It has to mapped with IO to read prior to
- * calling this, [contains 4 tuples- src ip, dest ip,
- * src port, dest port].
- * @params length length of p_addr header up to past the transport header.
- * @params qid receive packet will be directed to this queue.
- * @params vport_id
- * @params b_is_add flag to add or remove filter.
- *
- */
-enum _ecore_status_t
-ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length,
- u16 qid, u8 vport_id,
- bool b_is_add);
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index d09f3c4a..ed9837bf 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -11,6 +11,7 @@
#include "ecore_status.h"
#include "ecore_sp_api.h"
+#include "ecore_int_api.h"
#ifndef __EXTRACT__LINUX__
enum ecore_rss_caps {
@@ -35,8 +36,7 @@ struct ecore_queue_start_common_params {
/* Relative, but relevant only for PFs */
u8 stats_id;
- /* These are always absolute */
- u16 sb;
+ struct ecore_sb_info *p_sb;
u8 sb_idx;
};
@@ -436,4 +436,30 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev);
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 88c5ceb0..8edd2e96 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -20,6 +20,8 @@
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "ecore_dcbx.h"
+#include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
#define CHIP_MCP_RESP_ITER_US 10
#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
@@ -43,9 +45,9 @@
OFFSETOF(struct public_drv_mb, _field))
#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
- DRV_ID_PDA_COMP_VER_SHIFT)
+ DRV_ID_PDA_COMP_VER_OFFSET)
-#define MCP_BYTES_PER_MBIT_SHIFT 17
+#define MCP_BYTES_PER_MBIT_OFFSET 17
#ifndef ASIC_ONLY
static int loaded;
@@ -96,13 +98,81 @@ void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
}
}
+struct ecore_mcp_cmd_elem {
+ osal_list_entry_t list;
+ struct ecore_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(*p_cmd_elem));
+ if (!p_cmd_elem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
+ goto out;
+ }
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_cmd_elem *p_cmd_elem)
+{
+ OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+ OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return OSAL_NULL;
+}
+
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+#endif
}
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_SUCCESS;
@@ -157,8 +227,7 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return ECORE_SUCCESS;
}
@@ -176,6 +245,16 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
goto err;
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+ OSAL_LIST_INIT(&p_info->cmd_list);
+
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +269,6 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
- OSAL_SPIN_LOCK_INIT(&p_info->lock);
-
return ECORE_SUCCESS;
err:
@@ -202,58 +277,28 @@ err:
return ECORE_NOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
- u32 cmd)
-{
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
-
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
- */
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
+static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn, false,
- "Trying to send a MFW mailbox command [0x%x]"
- " in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
- return ECORE_BUSY;
- }
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return ECORE_SUCCESS;
-}
-
-static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
}
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
@@ -261,15 +306,20 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
delay = EMUL_MCP_RESP_ITER_US;
#endif
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != ECORE_SUCCESS)
- return rc;
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return ECORE_ABORTED;
+ }
+
+ /* Ensure that only a single thread is accessing the mailbox */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
@@ -289,73 +339,238 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
rc = ECORE_AGAIN;
}
- ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 seq, cnt = 1, actual_mb_seq;
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
+ struct ecore_mcp_cmd_elem,
+ list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static enum _ecore_status_t
+ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params *p_mb_params;
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return ECORE_AGAIN;
+
+ p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb,
+ union_data);
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return ECORE_SUCCESS;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
+ bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn, false,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
+static enum _ecore_status_t
+_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
enum _ecore_status_t rc = ECORE_SUCCESS;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
-#endif
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
- */
- if (p_hwfn->mcp_info->mcp_hist !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
- ecore_load_mcp_offsets(p_hwfn, p_ptt);
- ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!ecore_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_UDELAY(delay);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_AGAIN;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ /* Send the mailbox command */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = ECORE_NOMEM;
+ goto err;
+ }
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+ __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ /* Wait for the MFW response */
do {
- /* Wait for MFW response */
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
OSAL_UDELAY(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < max_retries));
+ if (p_cmd_elem->b_is_completed)
+ break;
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = ECORE_AGAIN;
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ return ECORE_AGAIN;
}
+
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp, p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return ECORE_SUCCESS;
+
+err:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -364,9 +579,17 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
- union drv_union_data union_data;
- u32 union_data_addr;
- enum _ecore_status_t rc;
+ osal_size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
@@ -374,44 +597,24 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
- if (p_mb_params->data_src_size > sizeof(union_data) ||
- p_mb_params->data_dst_size > sizeof(union_data)) {
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
"The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
p_mb_params->data_src_size, p_mb_params->data_dst_size,
- sizeof(union_data));
+ union_data_size);
return ECORE_INVAL;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- OFFSETOF(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- OSAL_MEM_ZERO(&union_data, sizeof(union_data));
- if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
- OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
- p_mb_params->data_src_size);
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
- sizeof(union_data));
-
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != OSAL_NULL &&
- p_mb_params->data_dst_size)
- ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr, p_mb_params->data_dst_size);
-
- ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_ABORTED;
+ }
- return rc;
+ return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
@@ -520,7 +723,7 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
/* On CMT, always tell that it's engine */
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
*p_load_code = load_phase;
@@ -534,11 +737,28 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
}
#endif
-static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+static bool
+ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
+ enum ecore_override_force_load override_force_load)
{
- return (drv_role == DRV_ROLE_OS &&
- exist_drv_role == DRV_ROLE_PREBOOT) ||
- (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
}
static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
@@ -631,18 +851,16 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
load_req.drv_ver_0 = p_in_params->drv_ver_0;
load_req.drv_ver_1 = p_in_params->drv_ver_1;
load_req.fw_ver = p_in_params->fw_ver;
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
- p_in_params->drv_role);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
- p_in_params->timeout_val);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
- p_in_params->force_cmd);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
- p_in_params->avoid_eng_reset);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
DRV_ID_MCP_HSI_VER_CURRENT :
- (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
@@ -655,22 +873,20 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
mb_params.param,
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
load_req.drv_ver_0, load_req.drv_ver_1,
load_req.fw_ver, load_req.misc0,
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_LOCK_TO),
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_FLAGS0));
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS) {
@@ -689,29 +905,27 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
"Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
load_rsp.drv_ver_0, load_rsp.drv_ver_1,
load_rsp.fw_ver, load_rsp.misc0,
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
- ECORE_MFW_GET_FIELD(load_rsp.misc0,
- LOAD_RSP_FLAGS0));
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
p_out_params->exist_fw_ver = load_rsp.fw_ver;
p_out_params->exist_drv_role =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
p_out_params->mfw_hsi_ver =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
p_out_params->drv_exists =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
LOAD_RSP_FLAGS0_DRV_EXISTS;
}
return ECORE_SUCCESS;
}
-static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
- enum ecore_drv_role drv_role,
- u8 *p_mfw_drv_role)
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
{
switch (drv_role) {
case ECORE_DRV_ROLE_OS:
@@ -720,12 +934,7 @@ static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
case ECORE_DRV_ROLE_KDUMP:
*p_mfw_drv_role = DRV_ROLE_KDUMP;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum ecore_load_req_force {
@@ -734,10 +943,8 @@ enum ecore_load_req_force {
ECORE_LOAD_REQ_FORCE_ALL,
};
-static enum _ecore_status_t
-ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
- enum ecore_load_req_force force_cmd,
- u8 *p_mfw_force_cmd)
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
case ECORE_LOAD_REQ_FORCE_NONE:
@@ -749,12 +956,7 @@ ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
case ECORE_LOAD_REQ_FORCE_ALL:
*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
@@ -763,7 +965,7 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
{
struct ecore_load_req_out_params out_params;
struct ecore_load_req_in_params in_params;
- u8 mfw_drv_role, mfw_force_cmd;
+ u8 mfw_drv_role = 0, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
@@ -778,17 +980,10 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
in_params.drv_ver_0 = ECORE_VERSION;
in_params.drv_ver_1 = ecore_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
- rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
- rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
@@ -805,9 +1000,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
@@ -816,23 +1008,20 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
} else if (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
if (ecore_mcp_can_force_load(in_params.drv_role,
- out_params.exist_drv_role)) {
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
DP_INFO(p_hwfn,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
- rc = ecore_get_mfw_force_cmd(p_hwfn,
- ECORE_LOAD_REQ_FORCE_ALL,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
@@ -842,7 +1031,9 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
} else {
DP_NOTICE(p_hwfn, false,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
@@ -873,19 +1064,11 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
break;
- case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
- DP_NOTICE(p_hwfn, false,
- "MFW refused a load request [resp 0x%08x]. Aborting.\n",
- out_params.load_code);
- return ECORE_BUSY;
default:
DP_NOTICE(p_hwfn, false,
- "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
out_params.load_code);
- break;
+ return ECORE_BUSY;
}
p_params->load_code = out_params.load_code;
@@ -907,8 +1090,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
return rc;
}
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
-
/* Check if there is a DID mismatch between nvm-cfg/efuse */
if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
DP_NOTICE(p_hwfn, false,
@@ -1029,12 +1210,60 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port,
transceiver_data)));
- transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
+ transceiver_state = GET_MFW_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+
+ OSAL_TRANSCEIVER_UPDATE(p_hwfn);
+}
+
+static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link)
+{
+ u32 eee_status, val;
+
+ p_link->eee_adv_caps = 0;
+ p_link->eee_lp_adv_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
}
static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
@@ -1045,6 +1274,9 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
+ /* Prevent SW/attentions from doing this at the same time */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+
p_link = &p_hwfn->mcp_info->link_output;
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
if (!b_reset) {
@@ -1060,13 +1292,27 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Resetting link indications\n");
- return;
+ goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1113,10 +1359,10 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
p_link, max_bw);
- /* Mintz bandwidth configuration */
+ /* Min bandwidth configuration */
__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
p_link, min_bw);
- ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
@@ -1171,7 +1417,12 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
+ ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+
OSAL_LINK_UPDATE(p_hwfn);
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
@@ -1198,12 +1449,32 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by ecore, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
+ params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+ if (params->eee.tx_lpi_enable)
+ phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
+ if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
+ if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
+ phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
+ EEE_TX_TIMER_USEC_OFFSET) &
+ EEE_TX_TIMER_USEC_MASK;
+ }
+
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x\n",
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode);
else
@@ -1221,11 +1492,15 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
return rc;
}
- /* Reset the link status if needed */
- if (!b_up)
- ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
- return rc;
+ return ECORE_SUCCESS;
}
u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
@@ -1300,7 +1575,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Invalid protocol type %d\n", type);
return;
}
@@ -1331,7 +1607,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
*/
p_info->bandwidth_min = (p_shmem_info->config &
FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
+ FUNC_MF_CFG_MIN_BW_OFFSET;
if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
@@ -1341,7 +1617,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
p_info->bandwidth_max = (p_shmem_info->config &
FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
+ FUNC_MF_CFG_MAX_BW_OFFSET;
if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
@@ -1350,28 +1626,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
}
}
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
- size = OSAL_MIN_T(u32, sizeof(*p_data),
- SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static void
ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
@@ -1394,8 +1648,7 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
&param);
}
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
@@ -1436,11 +1689,16 @@ ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
- DP_NOTICE(p_hwfn, false,
- "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- p_mdump_cmd_params->cmd);
- rc = ECORE_INVAL;
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = ECORE_NOTIMPL;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = ECORE_NOTIMPL;
}
return rc;
@@ -1498,16 +1756,10 @@ ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (rc != ECORE_SUCCESS)
return rc;
- if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
- DP_INFO(p_hwfn,
- "The mdump command is not supported by the MFW\n");
- return ECORE_NOTIMPL;
- }
-
if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
- DP_NOTICE(p_hwfn, false,
- "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mdump_cmd_params.mcp_resp);
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
@@ -1568,17 +1820,71 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ struct mdump_retain_data_stc mfw_mdump_retain;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mdump_retain->valid = mfw_mdump_retain.valid;
+ p_mdump_retain->epoch = mfw_mdump_retain.epoch;
+ p_mdump_retain->pf = mfw_mdump_retain.pf;
+ p_mdump_retain->status = mfw_mdump_retain.status;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_mdump_retain_data mdump_retain;
+ enum _ecore_status_t rc;
+
/* In CMT mode - no need for more than a single acknowledgment to the
* MFW, and no more than a single notification to the upper driver.
*/
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
return;
- DP_NOTICE(p_hwfn, false,
- "Received a critical error notification from the MFW!\n");
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device\n");
+ }
if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
@@ -1586,10 +1892,80 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
return;
}
+ DP_NOTICE(p_hwfn, false,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
ecore_mcp_mdump_ack(p_hwfn, p_ptt);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
}
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ return;
+
+ OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, oem_cfg_port));
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
+ val);
+
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
+ if (val == OEM_CFG_SCHED_TYPE_ETS)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
+ else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
+ val);
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
+ OEM_CFG_FUNC_HOST_PRI_CTRL);
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
+ else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type);
+}
+
+static enum _ecore_status_t
+ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ ecore_qm_reconf(p_hwfn, p_ptt);
+ } else {
+ /* Merge UFP TC with the dcbx TC data */
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ }
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update_ufp(p_hwfn);
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1632,6 +2008,15 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
ECORE_DCBX_OPERATIONAL_MIB);
+ /* clear the user-config cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+ break;
+ case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
+ ecore_lldp_mib_update_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
@@ -1649,7 +2034,7 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_FAILURE_DETECTED:
- ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ ecore_mcp_handle_fan_failure(p_hwfn);
break;
case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
@@ -1732,14 +2117,13 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
- struct ecore_ptt *p_ptt;
/* TODO - Add support for VFs */
- if (IS_VF(p_dev))
+ if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
@@ -1747,16 +2131,15 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_BUSY;
}
- *p_media_type = MEDIA_UNSPECIFIED;
-
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return ECORE_BUSY;
-
- *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port, media_type));
-
- ecore_ptt_release(p_hwfn, p_ptt);
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return ECORE_INVAL;
+ } else {
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ media_type));
+ }
return ECORE_SUCCESS;
}
@@ -1928,42 +2311,6 @@ const struct ecore_mcp_function_info
return &p_hwfn->mcp_info->func_info;
}
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params)
-{
- enum _ecore_status_t rc;
-
- switch (params->type) {
- case ECORE_MCP_NVM_RD:
- rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param,
- params->nvm_rd.buf_size,
- params->nvm_rd.buf);
- break;
- case ECORE_MCP_CMD:
- rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param);
- break;
- case ECORE_MCP_NVM_WR:
- rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param,
- params->nvm_wr.buf_size,
- params->nvm_wr.buf);
- break;
- default:
- rc = ECORE_NOTIMPL;
- break;
- }
- return rc;
-}
-
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 personalities)
{
@@ -2009,8 +2356,8 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
- MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
- flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
*p_flash_size = flash_size;
@@ -2035,9 +2382,10 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 vf_id, u8 num)
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
enum _ecore_status_t rc;
@@ -2048,9 +2396,9 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
num *= p_hwfn->p_dev->num_hwfns;
- param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
- param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
@@ -2069,6 +2417,39 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
return rc;
}
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n",
+ num);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
@@ -2106,33 +2487,68 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define ECORE_MCP_HALT_SLEEP_MS 10
+#define ECORE_MCP_HALT_MAX_RETRIES 10
+
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
enum _ecore_status_t rc;
- u32 resp = 0, param = 0;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return ECORE_SUCCESS;
}
+#define ECORE_MCP_RESUME_SLEEP_MS 10
+
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+
+ OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, false);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
@@ -2140,9 +2556,9 @@ ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_client client)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (client) {
case ECORE_OV_CLIENT_DRV:
@@ -2172,9 +2588,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_driver_state drv_state)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (drv_state) {
case ECORE_OV_DRIVER_STATE_NOT_LOADED:
@@ -2247,8 +2663,8 @@ enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 mask_parities)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
mask_parities, &resp, &param);
@@ -2270,7 +2686,7 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
u32 bytes_left, offset, bytes_to_copy, buf_size;
- struct ecore_mcp_nvm_params params;
+ u32 nvm_offset, resp, param;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -2278,22 +2694,29 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
bytes_left = len;
offset = 0;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = (addr + offset) |
- (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_OK)) {
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = (addr + offset) | (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_NVM_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm read failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
break;
}
@@ -2301,14 +2724,14 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
* isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
- (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ (bytes_left - buf_size) % 0x1000)
OSAL_MSLEEP(1);
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2318,26 +2741,23 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
- DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
- params.nvm_common.offset = addr;
- params.nvm_rd.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ :
+ DRV_MSG_CODE_PHY_RAW_READ,
+ addr, &resp, &param, &len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2346,14 +2766,12 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
ecore_ptt_release(p_hwfn, p_ptt);
@@ -2363,19 +2781,16 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2385,19 +2800,16 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2409,37 +2821,58 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
- u32 buf_idx, buf_size;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- if (cmd == ECORE_PUT_FILE_DATA)
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
- else
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ switch (cmd) {
+ case ECORE_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case ECORE_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ case ECORE_EXT_PHY_FW_UPGRADE:
+ nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
+ cmd);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = ((buf_size <<
- DRV_MB_PARAM_NVM_LEN_SHIFT)
- | addr) + buf_idx;
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if (rc != ECORE_SUCCESS ||
- ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
- (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) +
+ buf_idx;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_write() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
/* This can be a lengthy process, and it's possible scheduler
* isn't preemptible. Sleep a bit to prevent CPU hogging.
@@ -2451,7 +2884,8 @@ enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
buf_idx += buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2461,25 +2895,21 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_wr.buf_size = len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
- DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
- params.nvm_common.offset = addr;
- params.nvm_wr.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
+ DRV_MSG_CODE_PHY_RAW_WRITE;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
+ &resp, &param, len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2489,20 +2919,17 @@ enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2513,42 +2940,42 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
+ u32 resp, param;
enum _ecore_status_t rc;
- u32 bytes_left, bytes_to_copy, buf_size;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
addr = offset;
offset = 0;
bytes_left = len;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((addr + offset) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
return ECORE_SUCCESS;
@@ -2559,36 +2986,35 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_idx, buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_idx, buf_size;
-
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((offset + buf_idx) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (buf_size <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_WRITE,
+ nvm_offset, &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver write command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
buf_idx += buf_size;
@@ -2604,7 +3030,7 @@ enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
drv_mb_param, &rsp, gpio_val);
@@ -2625,8 +3051,8 @@ enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, param, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
- (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
drv_mb_param, &rsp, &param);
@@ -2648,7 +3074,7 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
u32 drv_mb_param = 0, rsp, val = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
drv_mb_param, &rsp, &val);
@@ -2656,9 +3082,9 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
return rc;
*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
- DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
- DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+ DRV_MB_PARAM_GPIO_CTRL_OFFSET;
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
@@ -2673,7 +3099,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, &param);
@@ -2695,7 +3121,7 @@ enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, &param);
@@ -2717,7 +3143,7 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, num_images);
@@ -2735,26 +3161,20 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att, u32 image_index)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
-
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
- params.nvm_common.offset |= (image_index <<
- DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
- params.nvm_rd.buf = (u32 *)p_image_att;
-
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+ nvm_offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)p_image_att);
if (rc != ECORE_SUCCESS)
return rc;
- if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
(p_image_att->return_code != 1))
rc = ECORE_UNKNOWN_ERROR;
@@ -2788,13 +3208,13 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
- SENSOR_LOCATION_SHIFT;
+ SENSOR_LOCATION_OFFSET;
p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
- THRESHOLD_HIGH_SHIFT;
+ THRESHOLD_HIGH_OFFSET;
p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
- CRITICAL_TEMPERATURE_SHIFT;
+ CRITICAL_TEMPERATURE_OFFSET;
p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
- CURRENT_TEMP_SHIFT;
+ CURRENT_TEMP_OFFSET;
}
return ECORE_SUCCESS;
@@ -2805,23 +3225,17 @@ enum _ecore_status_t ecore_mcp_get_mba_versions(
struct ecore_ptt *p_ptt,
struct ecore_mba_vers *p_mba_vers)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEM_ZERO(&params, sizeof(params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
- params.nvm_common.offset = 0;
- params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
- params.nvm_rd.buf_size = &buf_size;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
+ 0, &resp, &param, &buf_size,
+ &p_mba_vers->mba_vers[0]);
if (rc != ECORE_SUCCESS)
return rc;
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_NVM_OK)
+ if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
rc = ECORE_UNKNOWN_ERROR;
if (buf_size != MCP_DRV_NVM_BUF_LEN)
@@ -2897,9 +3311,9 @@ ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
(ECORE_RESC_ALLOC_VERSION_MINOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
struct ecore_resc_alloc_in_params {
u32 cmd;
@@ -2983,10 +3397,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
"Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
p_in_params->cmd, p_in_params->res_id,
ecore_hw_get_resc_name(p_in_params->res_id),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_in_params->resc_max_val);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
@@ -3003,10 +3417,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_out_params->resc_num, p_out_params->resc_start,
p_out_params->vf_resc_num, p_out_params->vf_resc_start,
p_out_params->flags);
@@ -3094,7 +3508,7 @@ static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
}
if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
- u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+ u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
DP_NOTICE(p_hwfn, false,
"The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
@@ -3127,9 +3541,9 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
break;
}
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
@@ -3142,9 +3556,8 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
/* Analyze the response */
- p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
- RESOURCE_CMD_RSP_OWNER);
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
@@ -3199,6 +3612,36 @@ ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
+{
+ if (p_lock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params)
@@ -3209,8 +3652,8 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
: RESOURCE_OPCODE_RELEASE;
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
@@ -3223,7 +3666,7 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
/* Analyze the response */
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
@@ -3250,3 +3693,137 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
+ 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
+ if (rc == ECORE_SUCCESS)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
+ "MFW supported features: %08x\n",
+ p_hwfn->mcp_info->capabilities);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param, features;
+
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
+ features, &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr)
+{
+ struct attribute_cmd_write_stc attr_cmd_write;
+ enum _attribute_commands_e mfw_attr_cmd;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ switch (p_drv_attr->attr_cmd) {
+ case ECORE_MCP_DRV_ATTR_CMD_READ:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_WRITE:
+ mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
+ p_drv_attr->attr_cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
+ p_drv_attr->attr_num);
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
+ mfw_attr_cmd);
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
+ OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
+ attr_cmd_write.val = p_drv_attr->val;
+ attr_cmd_write.mask = p_drv_attr->mask;
+ attr_cmd_write.offset = p_drv_attr->offset;
+
+ mb_params.p_data_src = &attr_cmd_write;
+ mb_params.data_src_size = sizeof(attr_cmd_write);
+ }
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The attribute command is not supported by the MFW\n");
+ return ECORE_NOTIMPL;
+ } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
+ mb_params.mcp_resp, p_drv_attr->attr_cmd,
+ p_drv_attr->attr_num);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
+ p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
+ p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
+ mb_params.mcp_param);
+
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
+ p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
+ p_drv_attr->val = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val)
+{
+ struct ecore_mcp_mb_params mb_params = {0};
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 dword = val;
+
+ mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+ mb_params.param = offset;
+ mb_params.p_data_src = &dword;
+ mb_params.data_src_size = sizeof(dword);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to wol write request, rc = %d\n", rc);
+ }
+
+ if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+ val, offset, mb_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 77fb5a3c..6afaf7de 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -13,6 +13,7 @@
#include "mcp_public.h"
#include "ecore.h"
#include "ecore_mcp_api.h"
+#include "ecore_dev_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
@@ -24,17 +25,27 @@
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
-#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->p_dev->num_ports_in_engines * \
- ecore_device_num_engines((_p_hwfn)->p_dev)))
+ ecore_device_num_ports((_p_hwfn)->p_dev))
struct ecore_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- osal_spinlock_t lock;
- /* Flag to indicate whether sending a MFW mailbox is forbidden */
- bool block_mb_sending;
+ /* List for mailbox commands which were sent and wait for a response */
+ osal_list_t cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ osal_spinlock_t cmd_lock;
+
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ osal_spinlock_t link_lock;
/* Address of the MCP public area */
u32 public_base;
@@ -59,7 +70,10 @@ struct ecore_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
+
+ /* Capabilties negotiated with the MFW */
+ u32 capabilities;
};
struct ecore_mcp_mb_params {
@@ -97,7 +111,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * Can only be called after `num_ports_in_engines' is set
+ * Can only be called after `num_ports_in_engine' is set
*/
void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
@@ -150,9 +164,13 @@ enum ecore_drv_role {
};
struct ecore_load_req_params {
+ /* Input params */
enum ecore_drv_role drv_role;
u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
bool avoid_eng_reset;
+ enum ecore_override_force_load override_force_load;
+
+ /* Output params */
u32 load_code;
};
@@ -247,56 +265,6 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Sends an NVM write command request to the MFW with
- * payload.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
- * DRV_MSG_CODE_NVM_PUT_FILE_DATA
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param i_txn_size - Buffer size
- * @param i_buf - Pointer to the buffer
- *
- * @param return ECORE_SUCCESS upon success.
- */
-enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param,
- u32 i_txn_size,
- u32 *i_buf);
-
-/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
- *
- * @param return ECORE_SUCCESS upon success.
- */
-enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param,
- u32 *o_txn_size,
- u32 *o_buf);
-
-/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
@@ -368,12 +336,33 @@ enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
+ * @param epoch
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+struct ecore_mdump_retain_data {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+/**
+ * @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
@@ -426,7 +415,12 @@ enum ecore_resc_lock {
/* Locks that the MFW is aware of should be added here downwards */
/* Ecore only locks should be added here upwards */
- ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL,
+
+ /* A dummy value to be used for auxiliary functions in need of
+ * returning an 'error' value.
+ */
+ ECORE_RESC_LOCK_RESC_INVALID,
};
struct ecore_resc_lock_params {
@@ -440,9 +434,11 @@ struct ecore_resc_lock_params {
/* Number of times to retry locking */
u8 retry_num;
+#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
/* The interval in usec between retries */
u16 retry_interval;
+#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
/* Use sleep or delay between retries */
bool sleep_b4_retry;
@@ -493,4 +489,83 @@ enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params);
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent);
+
+/**
+ * @brief Learn of supported MFW features; To be done during early init
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Inform MFW of set of features supported by driver. Should be done
+ * inside the contet of the LOAD_REQ.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum ecore_mcp_drv_attr_cmd {
+ ECORE_MCP_DRV_ATTR_CMD_READ,
+ ECORE_MCP_DRV_ATTR_CMD_WRITE,
+ ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR,
+ ECORE_MCP_DRV_ATTR_CMD_CLEAR,
+};
+
+struct ecore_mcp_drv_attr {
+ enum ecore_mcp_drv_attr_cmd attr_cmd;
+ u32 attr_num;
+
+ /* R/RC - will be set with the read value
+ * W - should hold the required value to be written
+ * C - DC
+ */
+ u32 val;
+
+ /* W - mask/offset to be applied on the given value
+ * R/RC/C - DC
+ */
+ u32 mask;
+ u32 offset;
+};
+
+/**
+ * @brief Handle the drivers' attributes that are kept by the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_drv_attr
+ */
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr);
+
+/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index abc190c9..be3e91f0 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -23,24 +23,51 @@ struct ecore_mcp_link_pause_params {
bool forced_tx;
};
+enum ecore_mcp_eee_mode {
+ ECORE_MCP_EEE_DISABLED,
+ ECORE_MCP_EEE_ENABLED,
+ ECORE_MCP_EEE_UNSUPPORTED
+};
+
+struct ecore_link_eee_params {
+ u32 tx_lpi_timer;
+#define ECORE_EEE_1G_ADV (1 << 0)
+#define ECORE_EEE_10G_ADV (1 << 1)
+ /* Capabilities are represented using ECORE_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
+};
+
struct ecore_mcp_link_params {
struct ecore_mcp_link_speed_params speed;
struct ecore_mcp_link_pause_params pause;
u32 loopback_mode; /* in PMM_LOOPBACK values */
+ struct ecore_link_eee_params eee;
};
struct ecore_mcp_link_capabilities {
u32 speed_capabilities;
bool default_speed_autoneg; /* In Mb/s */
u32 default_speed; /* In Mb/s */
+ enum ecore_mcp_eee_mode default_eee;
+ u32 eee_lpi_timer;
+ u8 eee_speed_caps;
};
struct ecore_mcp_link_state {
bool link_up;
- u32 line_speed; /* In Mb/s */
u32 min_pf_rate; /* In Mb/s */
- u32 speed; /* In Mb/s */
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in MB/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -67,6 +94,10 @@ struct ecore_mcp_link_state {
u8 partner_adv_pause;
bool sfp_tx_fault;
+
+ bool eee_active;
+ u8 eee_adv_caps;
+ u8 eee_lp_adv_caps;
};
struct ecore_mcp_function_info {
@@ -88,37 +119,6 @@ struct ecore_mcp_function_info {
u16 mtu;
};
-struct ecore_mcp_nvm_common {
- u32 offset;
- u32 param;
- u32 resp;
- u32 cmd;
-};
-
-struct ecore_mcp_nvm_rd {
- u32 *buf_size;
- u32 *buf;
-};
-
-struct ecore_mcp_nvm_wr {
- u32 buf_size;
- u32 *buf;
-};
-
-struct ecore_mcp_nvm_params {
-#define ECORE_MCP_CMD (1 << 0)
-#define ECORE_MCP_NVM_RD (1 << 1)
-#define ECORE_MCP_NVM_WR (1 << 2)
- u8 type;
-
- struct ecore_mcp_nvm_common nvm_common;
-
- union {
- struct ecore_mcp_nvm_rd nvm_rd;
- struct ecore_mcp_nvm_wr nvm_wr;
- };
-};
-
#ifndef __EXTRACT__LINUX__
enum ecore_nvm_images {
ECORE_NVM_IMAGE_ISCSI_CFG,
@@ -583,14 +583,16 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
+ * @param p_ptt
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
- u32 *media_type);
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.
@@ -598,9 +600,9 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP
- * @param param - optional param
- * @param o_mcp_resp - the MCP response code (exclude sequence)
- * @param o_mcp_param - optional parameter provided by the MCP response
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence)
+ * @param o_mcp_param - Optional parameter provided by the MCP response
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - operation was successful
@@ -632,44 +634,6 @@ const struct ecore_mcp_function_info
*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
#endif
-/**
- * @brief - Function for reading/manipulating the nvram. Following are supported
- * functionalities.
- * 1. Read: Read the specified nvram offset.
- * input values:
- * type - ECORE_MCP_NVM_RD
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
- * offset - nvm offset
- *
- * output values:
- * buf - buffer
- * buf_size - buffer size
- *
- * 2. Write: Write the data at the specified nvram offset
- * input values:
- * type - ECORE_MCP_NVM_WR
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
- * offset - nvm offset
- * buf - buffer
- * buf_size - buffer size
- *
- * 3. Command: Send the NVM command to MCP.
- * input values:
- * type - ECORE_MCP_CMD
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
- * offset - nvm offset
- *
- *
- * @param p_hwfn
- * @param p_ptt
- * @param params
- *
- * @return ECORE_SUCCESS - operation was successful.
- */
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params);
-
#ifndef LINUX_REMOVE
/**
* @brief - count number of function with a matching personality on engine.
@@ -891,7 +855,7 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
- * @param p_buf - nvm write buffer
+ * @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -904,7 +868,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
*
* @param p_dev
* @param addr - nvm offset
- * @param p_buf - nvm write buffer
+ * @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -913,6 +877,56 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
u8 *p_buf, u32 len);
/**
+ * @brief - Sends an NVM write command request to the MFW with
+ * payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ * DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size - Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size,
+ u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size,
+ u32 *o_buf);
+
+/**
* @brief Read from sfp
*
* @param p_hwfn - hw function
@@ -1123,6 +1137,17 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief - Clear the mdump retained data.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
* @brief - Processes the TLV request from MFW i.e., get the required TLV info
* from the ecore client and send it to the MFW.
*
@@ -1134,4 +1159,13 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Return whether management firmware support smart AN
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff feature is supported.
+ */
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index 0bf1be88..3a1de094 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1403,9 +1403,9 @@ ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
return -1;
}
-static enum _ecore_status_t
-ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf,
+ u32 size)
{
union ecore_mfw_tlv_data *p_tlv_data;
struct ecore_drv_tlv_hdr tlv;
@@ -1512,8 +1512,7 @@ ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
/* Update the TLV values in the local buffer */
for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
if (tlv_group & id) {
- if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
- size))
+ if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
goto drv_done;
}
}
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 226e3d2a..66622323 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -22,6 +22,10 @@ struct ecore_eth_pf_params {
*/
u16 num_cons;
+ /* per-VF number of CIDs */
+ u8 num_vf_cons;
+#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
+
/* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters.
@@ -67,6 +71,7 @@ struct ecore_iscsi_pf_params {
u8 is_target;
u8 bdq_pbl_num_entries[2];
+ u8 disable_stats_collection;
};
enum ecore_rdma_protocol {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index c9c23096..1d085815 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -28,424 +28,506 @@
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34238
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34239
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34240
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34241
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34242
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34243
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34244
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34245
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34246
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34247
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34248
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34249
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34250
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34251
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34252
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34253
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34254
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34255
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34256
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34257
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34258
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34259
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34260
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34261
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34262
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34263
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34264
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34265
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34266
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34267
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34268
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34269
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34270
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34271
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34272
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34273
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34274
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34275
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34276
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34277
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34278
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34279
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34280
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34281
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34282
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34283
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34284
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34285
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34286
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34287
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34288
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34289
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34290
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34291
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34292
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34293
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34294
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34295
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34296
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34297
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34298
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34299
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34300
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34301
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
+#define QM_REG_RLGLBLCRD_RT_OFFSET 34842
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35099
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35101
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30855
+#define QM_REG_RLPFCRD_RT_OFFSET 35133
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
+#define QM_REG_RLPFENABLE_RT_OFFSET 35149
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30905
+#define QM_REG_WFQPFCRD_RT_OFFSET 35183
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35439
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35440
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31675
+#define QM_REG_TXPQMAP_RT_OFFSET 35953
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32699
+#define QM_REG_WFQVPCRD_RT_OFFSET 36977
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33211
+#define QM_REG_WFQVPMAP_RT_OFFSET 37489
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
+#define QM_REG_VOQCRDLINE_RT_OFFSET 38321
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742
-#define RUNTIME_ARRAY_SIZE 34309
+#define RUNTIME_ARRAY_SIZE 41743
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index c8e564f9..86e84964 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -49,6 +49,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
* for a physical function (PF).
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
@@ -58,6 +59,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index d6e4b9e0..7598e7a6 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -232,6 +232,7 @@ static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
@@ -241,14 +242,14 @@ static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
}
if (p_tunn->vxlan_port.b_update_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@@ -293,9 +294,11 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
&p_tun->ip_gre);
}
+#define ETH_P_8021Q 0x8100
+
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
- enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
@@ -305,6 +308,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
+ int i;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
@@ -332,20 +336,26 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
- switch (mode) {
- case ECORE_MF_DEFAULT:
- case ECORE_MF_NPAR:
- p_ramrod->mf_mode = MF_NPAR;
- break;
- case ECORE_MF_OVLAN:
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
p_ramrod->mf_mode = MF_OVLAN;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Unsupported MF mode, init as DEFAULT\n");
+ else
p_ramrod->mf_mode = MF_NPAR;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid =
+ OSAL_CPU_TO_LE16(ETH_P_8021Q);
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ else
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < 8; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
+ (u8)i;
}
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -358,7 +368,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
- if (IS_MF_SI(p_hwfn))
+ if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
+ &p_hwfn->p_dev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
@@ -384,18 +395,20 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index, p_ramrod->outer_tag);
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
+ p_ramrod->outer_tag_config.outer_tag.tci);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->p_dev->tunnel);
return rc;
}
-enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
@@ -419,6 +432,50 @@ enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+
+/* QM rate limiter resolution is 1.6Mbps */
+#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)
+
+/* FW uses 1/64k to express gd */
+#define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd))
+
+u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
+{
+ return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
+}
+
+u16 ecore_sp_rl_gd_denom(u32 gd)
+{
+ return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
+}
+
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params)
{
@@ -450,21 +507,37 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
- rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
- rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
- rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
- rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+ rl_update->rl_max_rate =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
+ rl_update->rl_r_ai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
+ rl_update->rl_r_hai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
+ rl_update->dcqcn_g =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
- rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
- params->dcqcn_timeuot_us);
+ rl_update->dcqcn_timeuot_us =
+ OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+ rl_update->qcn_update_param_flg,
+ rl_update->dcqcn_update_param_flg,
+ rl_update->rl_init_flg, rl_update->rl_start_flg,
+ rl_update->rl_stop_flg, rl_update->rl_id_first,
+ rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+ rl_update->rl_bc_rate, rl_update->rl_max_rate,
+ rl_update->rl_r_ai, rl_update->rl_r_hai,
+ rl_update->dcqcn_g, rl_update->dcqcn_k_us,
+ rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
+
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@@ -505,7 +578,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -551,3 +624,28 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 33e31e42..98009c65 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -59,8 +59,8 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf start tunneling configuration
- * @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
*
@@ -68,8 +68,8 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
- enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
@@ -85,7 +85,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_stop - PF Function Stop Ramrod
@@ -123,10 +123,10 @@ struct ecore_rl_update_params {
u8 rl_id_last;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
- u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
- u16 rl_r_ai; /* Active increase rate */
- u16 rl_r_hai; /* Hyper active increase rate */
- u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
+ u32 rl_max_rate; /* Maximum rate in Mbps resolution */
+ u32 rl_r_ai; /* Active increase rate */
+ u32 rl_r_hai; /* Hyper active increase rate */
+ u32 dcqcn_gd; /* DCQCN Alpha update gain */
u32 dcqcn_k_us; /* DCQCN Alpha update interval */
u32 dcqcn_timeuot_us;
u32 qcn_timeuot_us;
@@ -143,4 +143,23 @@ struct ecore_rl_update_params {
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params);
+/**
+ * @brief ecore_sp_pf_update_stag - PF STAG value update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn);
+
#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 3c1d05b3..70ffa8cd 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -36,9 +36,8 @@
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
- void *cookie,
- union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+ union event_ring_data OSAL_UNUSED * data,
u8 fw_return_code)
{
struct ecore_spq_comp_done *comp_done;
@@ -87,6 +86,7 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct ecore_spq_comp_done *comp_done;
+ struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@@ -103,8 +103,13 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_mcp_drain(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;
@@ -173,10 +178,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
+ struct e4_core_conn_context *p_cxt;
struct ecore_cxt_info cxt_info;
- struct core_conn_context *p_cxt;
- enum _ecore_status_t rc;
u16 physical_q;
+ enum _ecore_status_t rc;
cxt_info.iid = p_spq->cid;
@@ -225,9 +230,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent)
{
struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
u16 echo = ecore_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
- struct core_db_data db;
p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
elem = ecore_chain_produce(p_chain);
@@ -236,31 +241,24 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
- *elem = p_ent->elem; /* struct assignment */
+ *elem = p_ent->elem; /* Struct assignment */
- /* send a doorbell on the slow hwfn session */
- OSAL_MEMSET(&db, 0, sizeof(db));
- SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_SPQ_PROD_CMD);
- db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+ p_db_data->spq_prod =
+ OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
- /* make sure the SPQE is updated before the doorbell */
+ /* Make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
- DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
- *(u32 *)&db);
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
- /* make sure doorbell is rang */
+ /* Make sure doorbell is rang */
OSAL_WMB(p_hwfn->p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
" agg_params: %02x, prod: %04x\n",
- DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
- db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+ p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
+ p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
return ECORE_SUCCESS;
}
@@ -273,12 +271,16 @@ static enum _ecore_status_t
ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
- case PROTOCOLID_COMMON:
- return ecore_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- default:
+ ecore_spq_async_comp_cb cb;
+
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
@@ -286,6 +288,28 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
}
}
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
@@ -450,8 +474,11 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct core_db_data *p_db_data;
+ void OSAL_IOMEM *db_addr;
dma_addr_t p_phys = 0;
u32 i, capacity;
+ enum _ecore_status_t rc;
OSAL_LIST_INIT(&p_spq->pending);
OSAL_LIST_INIT(&p_spq->completion_pending);
@@ -489,6 +516,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
/* reset the chain itself */
ecore_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
}
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
@@ -530,7 +575,9 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+#endif
p_hwfn->p_spq = p_spq;
return ECORE_SUCCESS;
@@ -544,11 +591,16 @@ spq_allocate_fail:
void ecore_spq_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
+ void OSAL_IOMEM *db_addr;
u32 capacity;
if (!p_spq)
return;
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
+
if (p_spq->p_virt) {
capacity = ecore_chain_get_capacity(&p_spq->chain);
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@@ -559,7 +611,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
}
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
OSAL_FREE(p_hwfn->p_dev, p_spq);
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index e530f834..526cff08 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -86,6 +86,22 @@ struct ecore_consq {
struct ecore_chain chain;
};
+typedef enum _ecore_status_t
+(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb);
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct ecore_spq {
osal_spinlock_t lock;
@@ -124,6 +140,10 @@ struct ecore_spq {
u32 comp_count;
u32 cid;
+
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
struct ecore_port;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index db2873e7..b1e26d6f 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -27,6 +27,12 @@
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
@@ -53,9 +59,26 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
"CHANNEL_TLV_UPDATE_TUNN_PARAM",
"CHANNEL_TLV_COALESCE_UPDATE",
+ "CHANNEL_TLV_QID",
+ "CHANNEL_TLV_COALESCE_READ",
"CHANNEL_TLV_MAX"
};
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
+
/* IOV ramrods */
static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
@@ -193,9 +216,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
}
static struct ecore_queue_cid *
-ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- struct ecore_vf_queue *p_queue)
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
int i;
@@ -214,8 +235,7 @@ enum ecore_iov_validate_q_mode {
ECORE_IOV_VALIDATE_Q_DISABLE,
};
-static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
u16 qid,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
@@ -257,8 +277,7 @@ static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
- mode, false);
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
@@ -274,8 +293,7 @@ static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
- mode, true);
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -297,13 +315,12 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
}
/* Is there at least 1 queue open? */
-static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
false))
return true;
@@ -311,13 +328,12 @@ static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
return false;
}
-static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_txqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
true))
return true;
@@ -325,19 +341,6 @@ static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
return false;
}
-/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
-{
- int i;
-
- while (length--) {
- crc ^= *ptr++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
- }
- return crc;
-}
-
enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
int vfid,
struct ecore_ptt *p_ptt)
@@ -359,8 +362,8 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
/* Increment bulletin board version and compute crc */
p_bulletin->version++;
- p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
- p_vf->bulletin.size - crc_size);
+ p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
@@ -442,33 +445,6 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
}
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- struct ecore_igu_block *p_sb;
- u16 sb_id;
- u32 val;
-
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
- return;
- }
-
- for (sb_id = 0;
- sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
- !(p_sb->status & ECORE_IGU_STATUS_PF)) {
- val = ecore_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
-}
-
static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
{
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
@@ -621,20 +597,24 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov;
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
return ecore_iov_allocate_vfdb(p_hwfn);
}
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
ecore_iov_setup_vfdb(p_hwfn);
- ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
@@ -843,11 +823,52 @@ static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vf_id,
+ u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* If client overrides this, don't do anything */
+ if (p_hwfn->p_dev->b_dont_override_vf_msix)
+ return ECORE_SUCCESS;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = OSAL_MAX_T(u8, current_max,
+ p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
{
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
if (vf->to_disable)
return ECORE_SUCCESS;
@@ -861,11 +882,8 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
- rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
- vf->abs_vf_id, vf->num_sbs);
+ rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
return rc;
@@ -934,46 +952,38 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 num_rx_queues)
{
- struct ecore_igu_block *igu_blocks;
- int qid = 0, igu_id = 0;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
u32 val = 0;
- igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
-
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues =
+ (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
- while ((qid < num_rx_queues) &&
- (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
- if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
- struct cau_sb_entry sb_entry;
-
- vf->igu_sbs[qid] = (u16)igu_id;
- igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
- SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
- val);
-
- /* Configure igu sb in CAU which were marked valid */
- ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_hwfn->rel_pf_id,
- vf->abs_vf_id, 1);
- ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- igu_id * sizeof(u64), 2, 0);
- qid++;
- }
- igu_id++;
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = ecore_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, 0);
}
vf->num_sbs = (u8)num_rx_queues;
@@ -1009,10 +1019,8 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
ecore_wr(p_hwfn, p_ptt, addr, val);
- p_info->igu_map.igu_blocks[igu_id].status |=
- ECORE_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;
@@ -1110,34 +1118,28 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
vf->vport_id = p_params->vport_id;
vf->rss_eng_id = p_params->rss_eng_id;
- /* Perform sanity checking on the requested queue_id */
+ /* Since it's possible to relocate SBs, it's a bit difficult to check
+ * things here. Simply check whether the index falls in the range
+ * belonging to the PF.
+ */
for (i = 0; i < p_params->num_queues; i++) {
- u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
- u16 max_vf_qzone = min_vf_qzone +
- FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
-
qid = p_params->req_rx_queue[i];
- if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
DP_NOTICE(p_hwfn, true,
- "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
qid, p_params->rel_vf_id,
- min_vf_qzone, max_vf_qzone);
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
return ECORE_INVAL;
}
qid = p_params->req_tx_queue[i];
- if (qid > max_vf_qzone) {
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
DP_NOTICE(p_hwfn, true,
- "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
- qid, p_params->rel_vf_id, max_vf_qzone);
+ "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
return ECORE_INVAL;
}
-
- /* If client *really* wants, Tx qid can be shared with PF */
- if (qid < min_vf_qzone)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
- p_params->rel_vf_id, qid, i);
}
/* Limit number of queues according to number of CIDs */
@@ -1307,8 +1309,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
@@ -1364,7 +1365,12 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
- u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct ecore_dmae_params params;
@@ -1378,7 +1384,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
mbx->sw_mbx.response_size =
length + sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel)
+ if (!p_vf->b_hw_channel)
return;
#endif
@@ -1394,17 +1400,22 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
+ /* Once PF copies the rc to the VF, the latter can continue and
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
sizeof(u64) / 4, &params);
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
}
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
- enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
{
switch (flag) {
case ECORE_IOV_VP_UPDATE_ACTIVATE:
@@ -1442,15 +1453,15 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
- resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
- ecore_iov_vport_to_tlv(p_hwfn, i), size);
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
@@ -1460,12 +1471,13 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - vport_update resp: TLV %d, status %02x\n",
p_vf->relative_vf_id,
- ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
total_len += size;
}
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
@@ -1480,13 +1492,11 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
mbx->offset = (u8 *)mbx->reply_virt;
- ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
-
- OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
}
struct ecore_public_vf_info
@@ -1535,6 +1545,60 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+ DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
@@ -1571,6 +1635,8 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
+ ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
@@ -1582,18 +1648,18 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
- p_resp->num_mc_filters < p_req->num_mc_filters) {
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
- " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
- " vlan [%02x/%02x] mc [%02x/%02x]\n",
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_resp->num_txqs,
p_req->num_sbs, p_resp->num_sbs,
p_req->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
@@ -1610,8 +1676,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
return PFVF_STATUS_SUCCESS;
}
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
- struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
{
p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
@@ -1693,7 +1758,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
}
/* On 100g PFs, prevent old VFs from loading */
- if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support"
@@ -1721,14 +1786,24 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+ p_ptt);
+
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
@@ -1969,8 +2044,7 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid = OSAL_NULL;
/* There can be at most 1 Rx queue on qzone. Find it */
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
- p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
if (p_cid == OSAL_NULL)
continue;
@@ -2106,16 +2180,19 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
u8 status = PFVF_STATUS_SUCCESS;
enum _ecore_status_t rc;
+ OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
vf->vport_instance--;
vf->spoof_chk = false;
- if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
- (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn, false,
"VF [%02x] - considered malicious;"
" Unable to stop RX/TX queuess\n",
vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
}
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
@@ -2129,6 +2206,7 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
vf->configured_features = 0;
OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
@@ -2154,9 +2232,8 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
@@ -2171,6 +2248,42 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+ struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published if its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return ECORE_IOV_LEGACY_QID_TX;
+ else
+ return ECORE_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (p_qid_tlv == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
@@ -2179,11 +2292,11 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ u8 qid_usage_idx, vf_legacy = 0;
struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
struct ecore_queue_cid *p_cid;
- bool b_legacy_vf = false;
- u8 qid_usage_idx;
+ struct ecore_sb_info sb_dummy;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
@@ -2193,45 +2306,43 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Legacy VFs made assumptions on the CID their queues connected to,
- * assuming queue X used CID X.
- * TODO - need to validate that there was no official release post
- * the current legacy scheme that still made that assumption.
- */
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN)
- b_legacy_vf = true;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
- /* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
OSAL_MEMSET(&params, 0, sizeof(params));
params.queue_id = (u8)p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
- params.sb_idx = req->sb_index;
- /* TODO - set qid_usage_idx according to extended TLV. For now, use
- * '0' for Rx.
- */
- qid_usage_idx = 0;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->rx_qid;
- vf_params.b_legacy = b_legacy_vf;
+ vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- &params, &vf_params);
+ &params, true, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
- if (!b_legacy_vf)
+ if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
@@ -2254,7 +2365,8 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
out:
ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
- b_legacy_vf);
+ !!(vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD));
}
static void
@@ -2382,7 +2494,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
if (b_update_required) {
u16 geneve_port;
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
if (rc != ECORE_SUCCESS)
@@ -2397,11 +2509,11 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
}
send_resp:
- p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ p_resp = ecore_add_tlv(&mbx->offset,
CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
@@ -2433,9 +2545,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
@@ -2456,8 +2567,8 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
struct ecore_queue_cid *p_cid;
- bool b_legacy_vf = false;
- u8 qid_usage_idx;
+ struct ecore_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
u32 cid = 0;
enum _ecore_status_t rc;
u16 pq;
@@ -2470,39 +2581,35 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* In case this is a legacy VF - need to know to use the right cids.
- * TODO - need to validate that there was no official release post
- * the current legacy scheme that still made that assumption.
- */
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN)
- b_legacy_vf = true;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
- /* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
- params.sb_idx = req->sb_index;
-
- /* TODO - set qid_usage_idx according to extended TLV. For now, use
- * '1' for Tx.
- */
- qid_usage_idx = 1;
- if (p_queue->cids[qid_usage_idx].p_cid)
- goto out;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->tx_qid;
- vf_params.b_legacy = b_legacy_vf;
+ vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- &params, &vf_params);
+ &params, false, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
@@ -2528,80 +2635,74 @@ out:
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 rxq_id,
- u8 num_rxqs,
+ u8 qid_usage_idx,
bool cqe_completion)
{
+ struct ecore_vf_queue *p_queue;
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid, i;
- /* TODO - improve validation [wrap around] */
- if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ ECORE_IOV_VALIDATE_Q_NA)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
return ECORE_INVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
- struct ecore_queue_cid **pp_cid = OSAL_NULL;
+ p_queue = &vf->vf_queues[rxq_id];
- /* There can be at most a single Rx per qzone. Find it */
- for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
- if (p_queue->cids[i].p_cid &&
- !p_queue->cids[i].b_is_tx) {
- pp_cid = &p_queue->cids[i].p_cid;
- break;
- }
- }
- if (pp_cid == OSAL_NULL) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
- vf->relative_vf_id, qid);
- continue;
- }
-
- rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
- false, cqe_completion);
- if (rc != ECORE_SUCCESS)
- return rc;
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct ecore_queue_cid *p_cid;
- *pp_cid = OSAL_NULL;
- vf->num_active_rxqs--;
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx,
+ rxq_id, p_cid->qid_usage_idx);
+ return ECORE_INVAL;
}
- return rc;
+ /* Now that we know we have a valid Rx-queue - close it */
+ rc = ecore_eth_rx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ vf->num_active_rxqs--;
+
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
- u16 txq_id, u8 num_txqs)
+ u16 txq_id,
+ u8 qid_usage_idx)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_vf_queue *p_queue;
- int qid, j;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
- ECORE_IOV_VALIDATE_Q_NA) ||
- !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- p_queue = &vf->vf_queues[qid];
- for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
- if (p_queue->cids[j].p_cid == OSAL_NULL)
- continue;
-
- if (!p_queue->cids[j].b_is_tx)
- continue;
-
- rc = ecore_eth_tx_queue_stop(p_hwfn,
- p_queue->cids[j].p_cid);
- if (rc != ECORE_SUCCESS)
- return rc;
+ p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return ECORE_INVAL;
- p_queue->cids[j].p_cid = OSAL_NULL;
- }
- }
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- return rc;
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ return ECORE_SUCCESS;
}
static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2610,20 +2711,34 @@ static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ qid_usage_idx, req->cqe_completion);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
@@ -2634,19 +2749,35 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_txqs;
- rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+ qid_usage_idx);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
@@ -2662,6 +2793,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
u16 i;
@@ -2669,10 +2801,30 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- /* Validate inputs */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ /* Starting with the addition of CHANNEL_TLV_QID, this API started
+ * expecting a single queue at a time. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+ req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
- ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
vf->relative_vf_id, req->rx_qid,
@@ -2682,12 +2834,9 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < req->num_rxqs; i++) {
- struct ecore_vf_queue *p_queue;
u16 qid = req->rx_qid + i;
- p_queue = &vf->vf_queues[qid];
- handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- p_queue);
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
}
rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2696,7 +2845,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
complete_event_flg,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
goto out;
status = PFVF_STATUS_SUCCESS;
@@ -2931,8 +3080,7 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
goto out;
}
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[q_idx]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
p_rss->rss_ind_table[i] = p_cid;
}
@@ -2945,7 +3093,6 @@ out:
static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_sge_tpa_params *p_sge_tpa,
struct ecore_iov_vf_mbx *p_mbx,
@@ -3035,7 +3182,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
- ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
&sge_tpa_params, mbx, &tlvs_mask);
tlvs_accepted = tlvs_mask;
@@ -3066,8 +3213,8 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
"Upper-layer prevents said VF"
" configuration\n");
else
- DP_NOTICE(p_hwfn, true,
- "No feature tlvs found for vport update\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
@@ -3272,12 +3419,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
goto out;
}
- /* Update shadow copy of the VF configuration */
+ /* Update shadow copy of the VF configuration. In case shadow indicates
+ * the action should be blocked return success to VF to imitate the
+ * firmware behaviour in such case.
+ */
if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
- ECORE_SUCCESS) {
- status = PFVF_STATUS_FAILURE;
+ ECORE_SUCCESS)
goto out;
- }
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
@@ -3382,6 +3530,76 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_vf_queue *p_queue;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ } else {
+ if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+ p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
@@ -3422,8 +3640,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
@@ -3432,6 +3649,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
goto out;
}
+ vf->rx_coal = rx_coal;
}
/* TODO - in future, it might be possible to pass this in a per-cid
@@ -3456,6 +3674,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
goto out;
}
}
+ vf->tx_coal = tx_coal;
}
status = PFVF_STATUS_SUCCESS;
@@ -3464,6 +3683,92 @@ out:
sizeof(struct pfvf_def_resp_tlv), status);
}
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid)
+{
+ struct ecore_queue_cid *p_cid;
+ struct ecore_vf_info *vf;
+ struct ecore_ptt *p_ptt;
+ int i, rc = 0;
+
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%d] - Can not set coalescing: VF is not active\n",
+ vf_id);
+ return ECORE_INVAL;
+ }
+
+ vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3495,11 +3800,11 @@ static enum _ecore_status_t
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = ecore_rd(p_hwfn, p_ptt,
@@ -3514,7 +3819,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = ecore_rd(p_hwfn, p_ptt,
@@ -3524,7 +3829,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
OSAL_MSLEEP(20);
@@ -3623,8 +3928,7 @@ cleanup:
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
- p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
- ~(1ULL << (rel_vf_id % 64));
+ p_vf->vf_mbx.b_pending_msg = false;
}
return rc;
@@ -3734,11 +4038,11 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
- __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ __ecore_vf_get_link_params(p_params, p_bulletin);
if (p_link)
- __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ __ecore_vf_get_link_state(p_link, p_bulletin);
if (p_caps)
- __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
}
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
@@ -3754,12 +4058,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* ecore_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+#ifndef CONFIG_ECORE_SW_CHANNEL
+ if (!mbx->b_pending_msg) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%02x]: Trying to process mailbox message when none is pending\n",
+ p_vf->abs_vf_id);
+ return;
+ }
+ mbx->b_pending_msg = false;
+#endif
mbx->first_tlv = mbx->req_virt->first_tlv;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Processing mailbox message [type %04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
OSAL_IOV_VF_MSG_TYPE(p_hwfn,
p_vf->relative_vf_id,
mbx->first_tlv.tl.type);
@@ -3820,6 +4134,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_COALESCE_READ:
+ ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
@@ -3884,26 +4201,20 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
#endif
}
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
{
- u64 add_bit = 1ULL << (vfid % 64);
+ int i;
- /* TODO - add locking mechanisms [no atomics in ecore, so we can't
- * add the lock inside the ecore_pf_iov struct].
- */
- p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+ OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events)
-{
- u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
- /* TODO - Take a lock */
- OSAL_MEMCPY(events, p_pending_events,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
- OSAL_MEMSET(p_pending_events, 0,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ if (p_vf->vf_mbx.b_pending_msg)
+ events[i / 64] |= 1ULL << (i % 64);
+ }
}
static struct ecore_vf_info *
@@ -3937,6 +4248,8 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
*/
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+ p_vf->vf_mbx.b_pending_msg = true;
+
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
@@ -3945,24 +4258,31 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
{
struct ecore_vf_info *p_vf;
- p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->errId);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn, false,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
}
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
@@ -4001,7 +4321,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
return i;
out:
- return E4_MAX_NUM_VFS;
+ return MAX_NUM_VFS_E4;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -4370,6 +4690,7 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid, int val)
{
+ struct ecore_mcp_link_state *p_link;
struct ecore_vf_info *vf;
u8 abs_vp_id = 0;
enum _ecore_status_t rc;
@@ -4383,7 +4704,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
@@ -4513,3 +4837,17 @@ ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
else
return 0;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ vf_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 3c2f58bd..850b1052 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -16,7 +16,7 @@
#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
- (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+ (MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@@ -45,6 +45,9 @@ struct ecore_iov_vf_mbx {
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
+ /* Message from VF awaits handling */
+ bool b_pending_msg;
+
u8 *offset;
#ifdef CONFIG_ECORE_SW_CHANNEL
@@ -63,6 +66,10 @@ struct ecore_iov_vf_mbx {
*/
};
+#define ECORE_IOV_LEGACY_QID_RX (0)
+#define ECORE_IOV_LEGACY_QID_TX (1)
+#define ECORE_IOV_QID_INVALID (0xFE)
+
struct ecore_vf_queue_cid {
bool b_is_tx;
struct ecore_queue_cid *p_cid;
@@ -110,6 +117,11 @@ struct ecore_vf_info {
struct ecore_bulletin bulletin;
dma_addr_t vf_bulletin;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Determine whether PF communicate with VF using HW/SW channel */
+ bool b_hw_channel;
+#endif
+
/* PF saves a copy of the last VF acquire message */
struct vfpf_acquire_tlv acquire;
@@ -129,6 +141,9 @@ struct ecore_vf_info {
u8 num_rxqs;
u8 num_txqs;
+ u16 rx_coal;
+ u16 tx_coal;
+
u8 num_sbs;
u8 num_mac_filters;
@@ -160,8 +175,7 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
- u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+ struct ecore_vf_info vfs_array[MAX_NUM_VFS_E4];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
#ifndef REMOVE_DBG
@@ -197,17 +211,13 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
+ * @param offset
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset,
- u16 type,
- u16 length);
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
@@ -231,10 +241,8 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
- * @param p_ptt
*/
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_free - free sriov related resources
@@ -251,38 +259,12 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn);
void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
/**
- * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data);
-
-/**
- * @brief calculate CRC for bulletin board validation
- *
- * @param basic crc seed
- * @param ptr to beginning of buffer
- * @length in bytes of buffer
- *
- * @return calculated crc over buffer [with respect to seed].
- */
-u32 ecore_crc32(u32 crc,
- u8 *ptr,
- u32 length);
-
-/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
u32 *disabled_vfs);
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index f4d331cf..25109dbd 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -44,7 +44,7 @@ static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* Init type and length */
- p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+ p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
/* Init first tlv header */
((struct vfpf_first_tlv *)p_tlv)->reply_address =
@@ -65,6 +65,14 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
}
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* The SW channel implementation of Windows needs to know the 'exact'
+ * response size of any given message. That means that for future
+ * messages we'd be unable to send TLVs to PF if he'll be unable to
+ * answer them if the |response| != |default response|.
+ * We'd need to handshake in acquire capabilities for any such.
+ */
+#endif
static enum _ecore_status_t
ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
u8 *done, u32 resp_size)
@@ -122,35 +130,118 @@ ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
}
if (!*done) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF <-- PF Timeout [Type %d]\n",
- p_req->first_tlv.tl.type);
+ DP_NOTICE(p_hwfn, true,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
} else {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF response: %d [Type %d]\n",
- *done, p_req->first_tlv.tl.type);
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn, false,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+ return rc;
+}
+
+static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
+ bool b_final)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = ECORE_AGAIN;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+ if (p_iov->pf2vf_reply)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct ecore_bulletin_content);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ size);
}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = OSAL_NULL;
+
return rc;
}
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+ return _ecore_vf_pf_release(p_hwfn, true);
+}
+
#define VF_ACQUIRE_THRESH 3
static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF unwilling to fullill resource request: rxq [%02x/%02x]"
- " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
- " vlan [%02x/%02x] mc [%02x/%02x]."
- " Try PF recommended amount\n",
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_resp->num_txqs,
p_req->num_sbs, p_resp->num_sbs,
p_req->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
@@ -159,6 +250,7 @@ static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
}
static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
@@ -185,6 +277,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS;
OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
@@ -201,12 +294,17 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar)
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -221,10 +319,8 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* send acquire request */
rc = ecore_send_msg2pf(p_hwfn,
&resp->hdr.status, sizeof(*resp));
-
- /* PF timeout */
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS)
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
OSAL_MEMCPY(&p_iov->acquire_resp,
@@ -310,6 +406,15 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true;
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs +
+ resp->resc.num_txqs;
+
rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
if (rc) {
DP_NOTICE(p_hwfn, true,
@@ -325,7 +430,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* get HW info */
p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
- p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+ p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
DP_INFO(p_hwfn, "Chip details - %s%d\n",
ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
@@ -357,10 +462,28 @@ exit:
return rc;
}
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
struct ecore_vf_iov *p_iov;
u32 reg;
+ enum _ecore_status_t rc;
/* Set number of hwfns - might be overridden once leading hwfn learns
* actual configuration from PF.
@@ -368,10 +491,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->p_dev->num_hwfns = 1;
- /* Set the doorbell bar. Assumption: regview is set */
- p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
- PXP_VF_BAR0_START_DQ;
-
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
@@ -386,6 +505,31 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (p_hwfn->doorbells == OSAL_NULL) {
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ }
+
/* Allocate vf2pf msg */
p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_iov->
@@ -428,14 +572,44 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
- return ecore_vf_pf_acquire(p_hwfn);
+ rc = ecore_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (rc == ECORE_SUCCESS &&
+ p_iov->b_doorbell_bar &&
+ !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ ECORE_IS_CMT(p_hwfn->p_dev)) {
+ rc = _ecore_vf_pf_release(p_hwfn, false);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = ecore_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells,
+ p_hwfn->p_dev->doorbells);
+
+ return rc;
free_vf2pf_request:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
@@ -583,7 +757,7 @@ ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -627,8 +801,8 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
@@ -649,8 +823,10 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
}
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -704,8 +880,10 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -745,11 +923,13 @@ ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -799,8 +979,10 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -831,34 +1013,32 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
struct vfpf_update_rxq_tlv *req;
enum _ecore_status_t rc;
- /* TODO - API is limited to assuming continuous regions of queues,
- * but VF queues might not fullfil this requirement.
- * Need to consider whether we need new TLVs for this, or whether
- * simply doing it iteratively is good enough.
+ /* Starting with CHANNEL_TLV_QID and the need for additional queue
+ * information, this API stopped supporting multiple rxqs.
+ * TODO - remove this and change the API to accept a single queue-cid
+ * in a follow-up patch.
*/
- if (!num_rxqs)
+ if (num_rxqs != 1) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs can no longer update more than a single queue\n");
return ECORE_INVAL;
+ }
-again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- /* Find the length of the current contagious range of queues beginning
- * at first queue's index.
- */
req->rx_qid = (*pp_cid)->rel.queue_id;
- for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
- if (pp_cid[req->num_rxqs]->rel.queue_id !=
- req->rx_qid + req->num_rxqs)
- break;
+ req->num_rxqs = 1;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
if (comp_event_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+ ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -871,15 +1051,6 @@ again:
goto exit;
}
- /* Make sure we're done with all the queues */
- if (req->num_rxqs < num_rxqs) {
- num_rxqs -= req->num_rxqs;
- pp_cid += req->num_rxqs;
- /* TODO - should we give a non-locked variant instead? */
- ecore_vf_pf_req_end(p_hwfn, rc);
- goto again;
- }
-
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
@@ -908,12 +1079,15 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
req->only_untagged = only_untagged;
/* status blocks */
- for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
- if (p_hwfn->sbs_info[i])
- req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -944,7 +1118,7 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1051,7 +1225,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_activate_tlv *p_act_tlv;
size = sizeof(struct vfpf_vport_update_activate_tlv);
- p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_act_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1071,7 +1245,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
- p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1084,7 +1258,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
- p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1095,7 +1269,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
- p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1113,7 +1287,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
- p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
@@ -1135,7 +1309,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
- p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_rss_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1173,8 +1347,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
- p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
- tlv, size);
+ p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
@@ -1188,7 +1361,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
sge_tpa_params = p_params->sge_tpa_params;
size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
- p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1226,7 +1399,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1258,7 +1431,7 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1280,55 +1453,6 @@ exit:
return rc;
}
-enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
-{
- struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
- struct pfvf_def_resp_tlv *resp;
- struct vfpf_first_tlv *req;
- u32 size;
- enum _ecore_status_t rc;
-
- /* clear mailbox and prep first tlv */
- req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
- /* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
- CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- resp = &p_iov->pf2vf_reply->default_resp;
- rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
- if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
- rc = ECORE_AGAIN;
-
- ecore_vf_pf_req_end(p_hwfn, rc);
-
- p_hwfn->b_int_enabled = 0;
-
- if (p_iov->vf2pf_request)
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->vf2pf_request,
- p_iov->vf2pf_request_phys,
- sizeof(union vfpf_tlvs));
- if (p_iov->pf2vf_reply)
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->pf2vf_reply,
- p_iov->pf2vf_reply_phys,
- sizeof(union pfvf_tlvs));
-
- if (p_iov->bulletin.p_virt) {
- size = sizeof(struct ecore_bulletin_content);
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->bulletin.p_virt,
- p_iov->bulletin.phys, size);
- }
-
- OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
-
- return rc;
-}
-
void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast *p_filter_cmd)
{
@@ -1374,7 +1498,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
req->vlan = p_ucast->vlan;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1405,7 +1529,7 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1424,6 +1548,39 @@ exit:
return rc;
}
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_read_coal_resp_tlv *resp;
+ struct vfpf_read_coal_req_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*req));
+ req->qid = p_cid->rel.queue_id;
+ req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ *p_coal = resp->coal;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
enum _ecore_status_t
ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
struct ecore_queue_cid *p_cid)
@@ -1446,7 +1603,7 @@ ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
rx_coal, tx_coal, req->qid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
@@ -1479,6 +1636,24 @@ u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change)
{
@@ -1497,8 +1672,8 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
/* Verify the bulletin we see is valid */
- crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
- p_iov->bulletin.size - crc_size);
+ crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
if (crc != shadow.crc)
return ECORE_AGAIN;
@@ -1513,8 +1688,7 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_params, 0, sizeof(*p_params));
@@ -1531,12 +1705,11 @@ void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *params)
{
- __ecore_vf_get_link_params(p_hwfn, params,
+ __ecore_vf_get_link_params(params,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
@@ -1558,12 +1731,11 @@ void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *link)
{
- __ecore_vf_get_link_state(p_hwfn, link,
+ __ecore_vf_get_link_state(link,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
@@ -1573,7 +1745,7 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps)
{
- __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+ __ecore_vf_get_link_caps(p_link_caps,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
@@ -1703,3 +1875,10 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
*fw_rev = info->fw_rev;
*fw_eng = info->fw_eng;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw)
+{
+ p_hwfn->vf_iov_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index f4713884..de2758cb 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -14,6 +14,11 @@
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default.
+ */
+#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32)
+
/* This data is held in the ecore_hwfn structure for VFs only. */
struct ecore_vf_iov {
union vfpf_tlvs *vf2pf_request;
@@ -36,25 +41,47 @@ struct ecore_vf_iov {
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
-};
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Would be set if the VF is to try communicating with it PF
+ * using a hw channel.
+ */
+ bool b_hw_channel;
+#endif
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
+};
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce,
- struct ecore_queue_cid *p_cid);
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce,
- struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Get coalesce per VF's relative queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - coalesce value in micro second for VF queues.
+ * @param p_cid - queue cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- * Coalesce value '0' will omit the configuration.
+ * Coalesce value '0' will omit the configuration.
*
- * @param p_hwfn
- * @param rx_coal - coalesce value in micro second for rx queue
- * @param tx_coal - coalesce value in micro second for tx queue
- * @param queue_cid
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
@@ -200,6 +227,15 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id);
+/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be OSAL_NULL [during removal].
+ */
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb);
/**
* @brief ecore_vf_pf_vport_start - perform vport start for VF.
@@ -251,34 +287,28 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
- * @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
enum _ecore_status_t
@@ -286,5 +316,8 @@ ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn);
void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index be3a326b..9815cf8a 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -163,5 +163,18 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_eng);
void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief set the VF to use a SW/HW channel when communicating with PF.
+ * NOTICE: today the likely first place to call this from VF
+ * would be OSAL_VF_FILL_ACQUIRE_RESC_REQ(); Might want to consider
+ * something a bit more appropriate.
+ *
+ * @param p_hwfn
+ * @param b_is_hw - true iff VF is to use a HW-channel
+ */
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw);
+#endif
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 66184421..3ccc7665 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -19,13 +19,14 @@
*
**/
struct vf_pf_resc_request {
- u8 num_rxqs;
- u8 num_txqs;
- u8 num_sbs;
- u8 num_mac_filters;
- u8 num_vlan_filters;
- u8 num_mc_filters; /* No limit so superfluous */
- u16 padding;
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+ u8 num_cids;
+ u8 padding;
};
struct hw_sb_info {
@@ -92,6 +93,20 @@ struct vfpf_acquire_tlv {
/* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0)
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ * TODO - due to the CID limitations in Bar0, VFs currently don't pass
+ * this, and use the legacy CID scheme.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR (1 << 3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
@@ -170,6 +185,9 @@ struct pfvf_acquire_resp_tlv {
#endif
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3)
+
u16 db_size;
u8 indices_per_sb;
u8 os_type;
@@ -178,7 +196,8 @@ struct pfvf_acquire_resp_tlv {
u16 chip_rev;
u8 dev_type;
- u8 padding;
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
struct pfvf_stats_info stats_info;
@@ -210,7 +229,8 @@ struct pfvf_acquire_resp_tlv {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u8 padding[2];
+ u8 num_cids;
+ u8 padding;
} resc;
u32 bulletin_size;
@@ -223,6 +243,16 @@ struct pfvf_start_queue_resp_tlv {
u8 padding[4];
};
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
@@ -265,7 +295,15 @@ struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
+
+ /* While the API supports multiple Rx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
u8 num_rxqs;
+
u8 cqe_completion;
u8 padding[4];
};
@@ -275,6 +313,13 @@ struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
+
+ /* While the API supports multiple Tx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
u8 num_txqs;
u8 padding[5];
};
@@ -465,6 +510,19 @@ struct vfpf_update_coalesce {
u8 padding[2];
};
+struct vfpf_read_coal_req_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 qid;
+ u8 is_rx;
+ u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+ struct pfvf_tlv hdr;
+ u16 coal;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -478,6 +536,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
+ struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
@@ -487,6 +546,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
+ struct pfvf_read_coal_resp_tlv read_coal_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -605,6 +665,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
+ CHANNEL_TLV_QID,
+ CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 1ad8a962..81ca6634 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -28,19 +28,19 @@
typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_OFFSET 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_OFFSET 16
#define OFFSIZE_SIZE_MASK 0xffff0000
/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
#define SECTION_OFFSET(_offsize) \
- ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+ ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_OFFSET) << 2))
/* SECTION_SIZE is calculating the size in bytes out of offsize */
#define SECTION_SIZE(_offsize) \
- (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+ (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_OFFSET) << 2)
/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
* within section
@@ -59,7 +59,7 @@ struct eth_phy_cfg {
/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
u32 speed;
#define ETH_SPEED_AUTONEG 0
-#define ETH_SPEED_SMARTLINQ 0x8
+#define ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */
u32 pause; /* bitmask */
#define ETH_PAUSE_NONE 0x0
@@ -84,38 +84,28 @@ struct eth_phy_cfg {
/* Remote Serdes Loopback (RX to TX) */
#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
- /* Used to configure the EEE Tx LPI timer, has several modes of
- * operation, according to bits 29:28
- * 2'b00: Timer will be configured by nvram, output will be the value
- * from nvram.
- * 2'b01: Timer will be configured by nvram, output will be in
- * 16xmicroseconds.
- * 2'b10: bits 1:0 contain an nvram value which will be used instead
- * of the one located in the nvram. Output will be that value.
- * 2'b11: bits 19:0 contain the idle timer in microseconds; output
- * will be in 16xmicroseconds.
- * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
- */
- u32 eee_mode;
-#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
-#define EEE_MODE_TIMER_USEC_OFFSET (0)
-#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
-#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
-#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
-/* Set by the driver to request status timer will be in microseconds and and not
- * in EEE policy definition
+ u32 eee_cfg;
+/* EEE is enabled (configuration). Refer to eee_status->active for negotiated
+ * status
*/
-#define EEE_MODE_OUTPUT_TIME (1 << 28)
-/* Set by the driver to override default nvm timer */
-#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
-#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
-#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
+#define EEE_CFG_EEE_ENABLED (1 << 0)
+#define EEE_CFG_TX_LPI (1 << 1)
+#define EEE_CFG_ADV_SPEED_1G (1 << 2)
+#define EEE_CFG_ADV_SPEED_10G (1 << 3)
+#define EEE_TX_TIMER_USEC_MASK (0xfffffff0)
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000)
+
+ u32 link_modes; /* Additional link modes */
+#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX Deprecate */
};
struct port_mf_cfg {
u32 dynamic_cfg; /* device control channel */
#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_OFFSET 0
#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
u32 reserved[1];
@@ -274,11 +264,11 @@ struct couple_mode_teaming {
/**************************************
* LLDP and DCBX HSI structures
**************************************/
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
+#define MAX_SYSTEM_LLDP_TLV_DATA 32 /* In dwords. 128 in bytes*/
+#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
typedef enum _lldp_agent_e {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
@@ -289,15 +279,15 @@ typedef enum _lldp_agent_e {
struct lldp_config_params_s {
u32 config;
#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_TX_INTERVAL_OFFSET 0
#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_HOLD_OFFSET 8
#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_MAX_CREDIT_OFFSET 12
#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_RX_OFFSET 30
#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+#define LLDP_CONFIG_ENABLE_TX_OFFSET 31
/* Holds local Chassis ID TLV header, subtype and 9B of payload.
* If firtst byte is 0, then we will use default chassis ID
*/
@@ -321,17 +311,17 @@ struct lldp_status_params_s {
struct dcbx_ets_feature {
u32 flags;
#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_ENABLED_OFFSET 0
#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_WILLING_OFFSET 1
#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_ERROR_OFFSET 2
#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_CBS_OFFSET 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ETS_MAX_TCS_OFFSET 4
#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_OFFSET 8
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
*/
@@ -363,7 +353,7 @@ struct dcbx_ets_feature {
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_MAP_OFFSET 0
#define DCBX_APP_PRI_0 0x01
#define DCBX_APP_PRI_1 0x02
#define DCBX_APP_PRI_2 0x04
@@ -373,11 +363,11 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_PRI_6 0x40
#define DCBX_APP_PRI_7 0x80
#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_OFFSET 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_OFFSET 12
#define DCBX_APP_SF_IEEE_RESERVED 0
#define DCBX_APP_SF_IEEE_ETHTYPE 1
#define DCBX_APP_SF_IEEE_TCP_PORT 2
@@ -385,7 +375,7 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+#define DCBX_APP_PROTOCOL_ID_OFFSET 16
};
@@ -393,19 +383,19 @@ struct dcbx_app_priority_entry {
struct dcbx_app_priority_feature {
u32 flags;
#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_ENABLED_OFFSET 0
#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_WILLING_OFFSET 1
#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_ERROR_OFFSET 2
/* Not in use
#define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ #define DCBX_APP_DEFAULT_PRI_OFFSET 8
*/
#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_MAX_TCS_OFFSET 12
#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+#define DCBX_APP_NUM_ENTRIES_OFFSET 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
@@ -416,7 +406,7 @@ struct dcbx_features {
/* PFC feature */
u32 pfc;
#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_OFFSET 0
#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
@@ -427,17 +417,17 @@ struct dcbx_features {
#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_FLAGS_OFFSET 8
#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_CAPS_OFFSET 8
#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_MBC_OFFSET 14
#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_WILLING_OFFSET 15
#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ENABLED_OFFSET 16
#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
+#define DCBX_PFC_ERROR_OFFSET 17
/* APP feature */
struct dcbx_app_priority_feature app;
@@ -446,10 +436,12 @@ struct dcbx_features {
struct dcbx_local_params {
u32 config;
#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_DYNAMIC \
+ (DCBX_CONFIG_VERSION_IEEE | DCBX_CONFIG_VERSION_CEE)
#define DCBX_CONFIG_VERSION_STATIC 4
u32 flags;
@@ -461,7 +453,7 @@ struct dcbx_mib {
u32 flags;
/*
#define DCBX_CONFIG_VERSION_MASK 0x00000007
- #define DCBX_CONFIG_VERSION_SHIFT 0
+ #define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
@@ -472,19 +464,49 @@ struct dcbx_mib {
};
struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+/* This bit defines if system TLVs are instead of mandatory TLVS or in
+ * addition to them. Set 1 for replacing mandatory TLVs
+ */
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_OFFSET 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_OFFSET 16
u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
+/* Since this struct is written by MFW and read by driver need to add
+ * sequence guards (as in case of DCBX MIB)
+ */
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
struct dcb_dscp_map {
u32 flags;
#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE_OFFSET 0
#define DCB_DSCP_ENABLE 1
u32 dscp_pri_map[8];
};
+/**************************************
+ * Attributes commands
+ **************************************/
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
/**************************************/
/* */
/* P U B L I C G L O B A L */
@@ -512,12 +534,12 @@ struct public_global {
#define MDUMP_REASON_DUMP_AGED (1 << 2)
u32 ext_phy_upgrade_fw;
#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
-#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_OFFSET (0)
#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
-#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
+#define EXT_PHY_FW_UPGRADE_TYPE_OFFSET (16)
};
/**************************************/
@@ -567,9 +589,9 @@ struct public_path {
/* Reset on mcp reset, and incremented for eveny process kill event. */
u32 process_kill;
#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_COUNTER_OFFSET 0
#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define PROCESS_KILL_GLOB_AEU_BIT_OFFSET 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
};
@@ -697,6 +719,8 @@ struct public_port {
#define LFA_SPEED_MISMATCH (1 << 3)
#define LFA_FLOW_CTRL_MISMATCH (1 << 4)
#define LFA_ADV_SPEED_MISMATCH (1 << 5)
+#define LFA_EEE_MISMATCH (1 << 6)
+#define LFA_LINK_MODES_MISMATCH (1 << 7)
#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
#define LINK_FLAP_COUNT_OFFSET 16
@@ -721,13 +745,13 @@ struct public_port {
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x00000008
#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
/* 1G Passive copper cable */
@@ -775,6 +799,7 @@ struct public_port {
#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
@@ -787,38 +812,55 @@ struct public_port {
u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map;
- /* the status of EEE auto-negotiation
- * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
- * bits 23:20 the speeds advertised for EEE.
- * bits 27:24 the speeds the Link partner advertised for EEE.
- * The supported/adv. modes in bits 27:19 originate from the
- * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
- * bit 28 when 1'b1 EEE was requested.
- * bit 29 when 1'b1 tx lpi was requested.
- * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
- * are 2'b11.
- * bit 31 - When 1'b0 bits 15:0 contain
- * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
- * When 1'b1 those bits contains a value times 16 microseconds.
- */
u32 eee_status;
-#define EEE_TIMER_MASK 0x000fffff
-#define EEE_ADV_STATUS_MASK 0x00f00000
-#define EEE_1G_ADV (1 << 1)
-#define EEE_10G_ADV (1 << 2)
-#define EEE_ADV_STATUS_SHIFT 20
-#define EEE_LP_ADV_STATUS_MASK 0x0f000000
-#define EEE_LP_ADV_STATUS_SHIFT 24
-#define EEE_REQUESTED_BIT 0x10000000
-#define EEE_LPI_REQUESTED_BIT 0x20000000
-#define EEE_ACTIVE_BIT 0x40000000
-#define EEE_TIME_OUTPUT_BIT 0x80000000
+/* Set when EEE negotiation is complete. */
+#define EEE_ACTIVE_BIT (1 << 0)
+
+/* Shows the Local Device EEE capabilities */
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+ #define EEE_1G_ADV (1 << 1)
+ #define EEE_10G_ADV (1 << 2)
+/* Same values as in EEE_LD_ADV, but for Link Parter */
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+
+/* Supported speeds for EEE */
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+ #define EEE_1G_SUPPORTED (1 << 1)
+ #define EEE_10G_SUPPORTED (1 << 2)
u32 eee_remote; /* Used for EEE in LLDP */
#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_SHIFT 16
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+#define ETH_TRANSCEIVER_MONITORING_TYPE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_MONITORING_TYPE_OFFSET 0
+#define ETH_TRANSCEIVER_ADDR_CHNG_REQUIRED (1 << 2)
+#define ETH_TRANSCEIVER_RCV_PWR_MEASURE_TYPE (1 << 3)
+#define ETH_TRANSCEIVER_EXTERNALLY_CALIBRATED (1 << 4)
+#define ETH_TRANSCEIVER_INTERNALLY_CALIBRATED (1 << 5)
+#define ETH_TRANSCEIVER_HAS_DIAGNOSTIC (1 << 6)
+#define ETH_TRANSCEIVER_IDENT_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_IDENT_OFFSET 8
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
};
/**************************************/
@@ -857,11 +899,11 @@ struct public_func {
/* function 0 of each port cannot be hidden */
#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001
#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_OFFSET 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
@@ -871,18 +913,20 @@ struct public_func {
/* MINBW, MAXBW */
/* value range - 0..100, increments in 1 % */
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_OFFSET 8
#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_OFFSET 16
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+#define FUNC_STATUS_LOGICAL_LINK_UP 0x00000002
+#define FUNC_STATUS_FORCED_LINK 0x00000004
u32 mac_upper; /* MAC */
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_OFFSET 0
#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
u32 mac_lower;
#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
@@ -895,7 +939,7 @@ struct public_func {
u32 ovlan_stag; /* tags */
#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_OFFSET 0
#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
u32 pf_allocation; /* vf per pf */
@@ -912,29 +956,46 @@ struct public_func {
u32 drv_id;
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define DRV_ID_PDA_COMP_VER_OFFSET 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_OFFSET 16
#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
+ DRV_ID_MCP_HSI_VER_OFFSET)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_OFFSET 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+#define DRV_ID_DRV_INIT_HW_OFFSET 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
};
/**************************************/
@@ -1019,13 +1080,13 @@ struct ocbb_data_stc {
#define MFW_SENSOR_LOCATION_EXTERNAL 2
#define MFW_SENSOR_LOCATION_SFP 3
-#define SENSOR_LOCATION_SHIFT 0
+#define SENSOR_LOCATION_OFFSET 0
#define SENSOR_LOCATION_MASK 0x000000ff
-#define THRESHOLD_HIGH_SHIFT 8
+#define THRESHOLD_HIGH_OFFSET 8
#define THRESHOLD_HIGH_MASK 0x0000ff00
-#define CRITICAL_TEMPERATURE_SHIFT 16
+#define CRITICAL_TEMPERATURE_OFFSET 16
#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
-#define CURRENT_TEMP_SHIFT 24
+#define CURRENT_TEMP_OFFSET 24
#define CURRENT_TEMP_MASK 0xff000000
struct temperature_status_stc {
u32 num_of_sensors;
@@ -1090,18 +1151,18 @@ struct load_req_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_ROLE_OFFSET 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_OFFSET 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_OFFSET 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_OFFSET 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
@@ -1111,14 +1172,27 @@ struct load_rsp_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_ROLE_OFFSET 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_HSI_OFFSET 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_OFFSET 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
union drv_union_data {
struct mcp_mac wol_mac; /* UNLOAD_DONE */
@@ -1149,6 +1223,8 @@ union drv_union_data {
struct load_req_stc load_req;
struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
/* ... */
};
@@ -1166,8 +1242,8 @@ struct public_drv_mb {
/* - DONT_CARE - Don't flap the link if up */
#define DRV_MSG_CODE_LINK_RESET 0x23000000
- /* Vitaly: LLDP commands */
#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX 0x24100000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
/* OneView feature driver HSI*/
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
@@ -1189,18 +1265,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
/* Param should be set to the transaction size (up to 64 bytes) */
#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
/* MFW will place the file offset and len in file_att struct */
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-/* Read 32bytes of nvram data. Param is [0:23] – Offset [24:31] –
- * Len in Bytes
+/* Read 32bytes of nvram data. Param is [0:23] ??? Offset [24:31] -
+ * ??? Len in Bytes
*/
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-/* Writes up to 32Bytes to nvram. Param is [0:23] – Offset [24:31] –
- * Len in Bytes. In case this address is in the range of secured file in
+/* Writes up to 32Bytes to nvram. Param is [0:23] ??? Offset [24:31]
+ * ??? Len in Bytes. In case this address is in the range of secured file in
* secured mode, the operation will fail
*/
#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
@@ -1242,7 +1319,7 @@ struct public_drv_mb {
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_OFFSET 4
#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
@@ -1270,9 +1347,9 @@ struct public_drv_mb {
/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
#define BW_MAX_MASK 0x000000ff
-#define BW_MAX_SHIFT 0
+#define BW_MAX_OFFSET 0
#define BW_MIN_MASK 0x0000ff00
-#define BW_MIN_SHIFT 8
+#define BW_MIN_OFFSET 8
/* When param is set to 1, all parities will be masked(disabled). When params
* are set to 0, parities will be unmasked again.
@@ -1305,9 +1382,9 @@ struct public_drv_mb {
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_RESC_OFFSET 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_CMD_REQ_OPCODE_OFFSET 5
/* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ 1
/* request resource ownership without aging */
@@ -1318,12 +1395,12 @@ struct public_drv_mb {
/* force resource release */
#define RESOURCE_OPCODE_FORCE_RELEASE 5
#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+#define RESOURCE_CMD_REQ_AGE_OFFSET 8
#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OWNER_OFFSET 0
#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_CMD_RSP_OPCODE_OFFSET 8
/* resource is free and granted to requester */
#define RESOURCE_OPCODE_GNT 1
/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
@@ -1361,6 +1438,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
/* Clear all logs */
#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 /* Get retained data */
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 /* Clear retain data */
#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_INFO 0x00270000
@@ -1368,14 +1447,28 @@ struct public_drv_mb {
#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
/* Value should be placed in union */
#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
-#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_OFFSET 0
#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
-#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_OFFSET 16
#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
-#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_OFFSET 21
#define DRV_MB_PARAM_PORT_MASK 0x00600000
#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
+#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 /* Param: None */
+/* Param: Set DRV_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
+/* return FW_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
+#define DRV_MSG_CODE_READ_WOL_REG 0X00320000
+#define DRV_MSG_CODE_WRITE_WOL_REG 0X00330000
+#define DRV_MSG_CODE_GET_WOL_BUFFER 0X00340000
+/* Param: [0:23] Attribute key, [24:31] Attribute sub command */
+#define DRV_MSG_CODE_ATTRIBUTE 0x00350000
+
+/* Param: Password len. Union: Plain Password */
+#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000
+
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
@@ -1393,45 +1486,56 @@ struct public_drv_mb {
#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
/* LLDP / DCBX params*/
+ /* To be used with SET_LLDP command */
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0
+ /* To be used with SET_LLDP and REGISTER_LLDP_TLVS_RX commands */
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
-#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1
+ /* To be used with REGISTER_LLDP_TLVS_RX command */
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_OFFSET 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_OFFSET 4
+ /* To be used with SET_DCBX command */
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3
#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_OFFSET 0
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
-#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_OFFSET 0
#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
-#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_OFFSET 16
#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
-#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_OFFSET 29
#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
-#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_OFFSET 30
#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
-#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0
+#define DRV_MB_PARAM_PHYMOD_LANE_OFFSET 0
#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
-#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
+#define DRV_MB_PARAM_PHYMOD_SIZE_OFFSET 8
#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
- /* configure vf MSIX params*/
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+ /* configure vf MSIX params BB */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET 8
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+ /* configure vf MSIX for PF params AH*/
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_OFFSET 0
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_MASK 0x000000FF
/* OneView configuration parametres */
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OFFSET 0
#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
@@ -1442,7 +1546,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
-#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OFFSET 0
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
@@ -1455,17 +1559,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
-#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_OFFSET 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_OFFSET 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_OFFSET 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
/* Not Installed*/
@@ -1476,36 +1580,36 @@ struct public_drv_mb {
/* installed and active */
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
-#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0
+#define DRV_MB_PARAM_GPIO_NUMBER_OFFSET 0
#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
-#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
+#define DRV_MB_PARAM_GPIO_VALUE_OFFSET 16
#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
-#define DRV_MB_PARAM_GPIO_DIRECTION_SHIFT 16
+#define DRV_MB_PARAM_GPIO_DIRECTION_OFFSET 16
#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
-#define DRV_MB_PARAM_GPIO_CTRL_SHIFT 24
+#define DRV_MB_PARAM_GPIO_CTRL_OFFSET 24
#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
/* Resource Allocation params - Driver version support*/
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
@@ -1518,11 +1622,27 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_RC_FAILED 2
#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+/* driver supports SmartLinQ parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+/* driver supports EEE parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16
+/* driver supports virtual link parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+ /* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
@@ -1545,6 +1665,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_REGISTER_LLDP_TLVS_RX_DONE 0x24100000
#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
@@ -1597,6 +1718,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_ERROR 0x00170000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
@@ -1628,16 +1750,37 @@ struct public_drv_mb {
#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000
#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000
+
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000
+
+#define FW_MSG_CODE_WOL_READ_WRITE_OK 0x00820000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_VAL 0x00830000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_ADDR 0x00840000
+#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000
+#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000
+
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY 0x00020000
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD 0x00030000
u32 fw_mb_param;
/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
+
+/* get MFW feature support response */
+/* MFW supports SmartLinQ */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
+/* MFW supports EEE */
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW supports virtual link */
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
@@ -1702,6 +1845,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 4e588350..c99e805d 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,20 +13,20 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 12/15/2016
+ * Created: 5/8/2017
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
-#define NVM_CFG_version 0x81805
+#define NVM_CFG_version 0x83000
-#define NVM_CFG_new_option_seq 15
+#define NVM_CFG_new_option_seq 23
-#define NVM_CFG_removed_option_seq 0
+#define NVM_CFG_removed_option_seq 1
-#define NVM_CFG_updated_value_seq 1
+#define NVM_CFG_updated_value_seq 4
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
@@ -342,9 +342,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
/* Set caution temperature */
- #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK \
- 0x00FF0000
- #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_OFFSET 16
/* Set external thermal sensor I2C address */
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
0xFF000000
@@ -509,6 +508,10 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
#define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
#define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_MASK 0x30000000
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -1036,7 +1039,13 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
- u32 reserved[58]; /* 0x140 */
+ u32 preboot_debug_mode_std; /* 0x140 */
+ u32 preboot_debug_mode_ext; /* 0x144 */
+ u32 ext_phy_cfg1; /* 0x148 */
+ /* Ext PHY MDI pair swap value */
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_OFFSET 0
+ u32 reserved[55]; /* 0x14C */
};
struct nvm_cfg1_path {
@@ -1134,6 +1143,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1142,6 +1152,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1152,11 +1163,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
@@ -1167,11 +1178,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
@@ -1203,6 +1214,14 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
#define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
#define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_MASK 0x00800000
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_OFFSET 23
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_MASK 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_DISABLED 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_ENABLED 0x1
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -1243,6 +1262,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM5422X 0x2
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
/* EEE power saving mode */
@@ -1276,19 +1296,27 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \
0x00E00000
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_MASK \
+ 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_ENABLED 0x1
u32 mba_cfg2; /* 0x2C */
#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_MASK 0x01FE0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_OFFSET 17
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
@@ -1304,9 +1332,12 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_25G 0x4
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_25G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_40G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_40G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_50G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_50G 0x20
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
u32 transceiver_00; /* 0x40 */
/* Define for mapping of transceiver signal module absent */
@@ -1412,6 +1443,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1423,6 +1455,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1434,21 +1467,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1490,6 +1523,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1501,6 +1535,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1512,21 +1547,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1568,6 +1603,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1579,6 +1615,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1590,21 +1627,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1646,6 +1683,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1658,6 +1696,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1670,21 +1709,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1726,6 +1765,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
@@ -1735,6 +1775,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
@@ -1745,21 +1786,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1795,7 +1836,13 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
- u32 reserved[116]; /* 0x88 */
+ u32 temperature; /* 0x88 */
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_OFFSET 0
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_OFFSET 8
+ u32 reserved[115]; /* 0x8C */
};
struct nvm_cfg1_func {
@@ -1910,6 +1957,7 @@ struct nvm_cfg1_func {
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_RDMA 0x8
u32 reserved[8]; /* 0x30 */
};
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 60286545..ad15d28a 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1205,3 +1205,20 @@
#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
+
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
+#define DORQ_REG_PF_USAGE_CNT 0x1009c0UL
+#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL
+#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL
+#define DORQ_REG_INT_STS 0x100180UL
+ #define DORQ_REG_INT_STS_DB_DROP (0x1 << 1)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1 << 2)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1 << 3)
+#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL
+#define DORQ_REG_INT_STS_WR 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 0e059898..88321451 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -453,6 +453,12 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
+#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
+ if (IS_VF(edev)) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
+ }
+#endif
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -463,7 +469,8 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
break;
}
}
- DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ DP_INFO(edev, "vport %s VF tx-switch %s\n", flg ? "activated" : "deactivated",
+ params.tx_switching_flg ? "enabled" : "disabled");
return rc;
}
@@ -520,7 +527,7 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return -1;
}
}
-
+ qdev->enable_lro = flg;
DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
return 0;
@@ -602,15 +609,53 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
return ecore_filter_accept_cmd(edev, 0, flags, false, false,
ECORE_SPQ_MODE_CB, NULL);
}
-static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
- uint8_t clss, bool mode, bool mask)
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable, bool mask)
{
- memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
- p_tunn->vxlan.b_update_mode = mode;
- p_tunn->vxlan.b_mode_enabled = mask;
- p_tunn->b_update_rx_cls = true;
- p_tunn->b_update_tx_cls = true;
- p_tunn->vxlan.tun_cls = clss;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_ptt *p_ptt;
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = enable;
+ tunn.vxlan.b_mode_enabled = mask;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ break;
+ }
+ }
+
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
+ }
+
+ return rc;
}
static int
@@ -975,7 +1020,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
return rc;
}
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -1013,6 +1058,8 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+
+ return 0;
}
static void qede_prandom_bytes(uint32_t *buff)
@@ -1078,6 +1125,7 @@ static void qede_fastpath_start(struct ecore_dev *edev)
static int qede_dev_start(struct rte_eth_dev *eth_dev)
{
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -1088,10 +1136,15 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
if (qede_update_mtu(eth_dev, qdev->new_mtu))
goto err;
qdev->mtu = qdev->new_mtu;
- /* If MTU has changed then update TPA too */
- if (qdev->enable_lro)
- if (qede_enable_tpa(eth_dev, true))
- goto err;
+ }
+
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
}
/* Start queues */
@@ -1103,7 +1156,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
@@ -1114,6 +1167,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
+ /* Update link status */
+ qede_link_update(eth_dev, 0);
+
/* Start/resume traffic */
qede_fastpath_start(edev);
@@ -1139,7 +1195,6 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
if (qdev->enable_lro)
qede_enable_tpa(eth_dev, false);
- /* TODO: Do we need disable LRO or RSS */
/* Stop queues */
qede_stop_queues(eth_dev);
@@ -1157,11 +1212,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ int ret;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
@@ -1226,20 +1282,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
qdev->mtu = rxmode->max_rx_pkt_len;
qdev->new_mtu = qdev->mtu;
- /* Configure TPA parameters */
- if (rxmode->enable_lro) {
- if (qede_enable_tpa(eth_dev, true))
- return -EINVAL;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
- }
- qdev->enable_lro = rxmode->enable_lro;
-
/* Enable VLAN offloads by default */
- qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
+ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK);
+ if (ret)
+ return ret;
DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
@@ -1330,7 +1378,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -1456,11 +1504,11 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
rte_intr_disable(&pci_dev->intr_handle);
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
-static void
+static int
qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -1544,6 +1592,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
if (j == txq_stat_cntrs)
break;
}
+
+ return 0;
}
static unsigned
@@ -1806,8 +1856,22 @@ static const uint32_t *
qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_L4_FRAG,
+ /* Inner */
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_FRAG,
RTE_PTYPE_UNKNOWN
};
@@ -2012,6 +2076,10 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
memset(&vport_update_params, 0, sizeof(vport_update_params));
params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
RTE_CACHE_LINE_SIZE);
+ if (params == NULL) {
+ DP_ERR(edev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -2031,7 +2099,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
params->update_rss_config = 1;
/* Fix up RETA for CMT mode device */
- if (edev->num_hwfns > 1)
+ if (ECORE_IS_CMT(edev))
qdev->rss_enable = qede_update_rss_parm_cmt(edev,
params);
vport_update_params.vport_id = 0;
@@ -2152,25 +2220,76 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_tunnel_info tunn; /* @DPDK */
struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ uint16_t udp_port;
int rc, i;
PMD_INIT_FUNC_TRACE(edev);
memset(&tunn, 0, sizeof(tunn));
if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ /* Enable VxLAN tunnel if needed before UDP port update using
+ * default MAC/VLAN classification.
+ */
+ if (add) {
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+ /* Enable VXLAN if it was not enabled while adding
+ * VXLAN filter.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+ } else {
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+ }
+
tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
- QEDE_VXLAN_DEF_PORT;
+ tunn.vxlan_port.port = udp_port;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
}
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false, true);
}
return 0;
@@ -2260,35 +2379,38 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn;
- struct ecore_hwfn *p_hwfn;
enum ecore_filter_ucast_type type;
- enum ecore_tunn_clss clss;
- struct ecore_filter_ucast ucast;
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ struct ecore_filter_ucast ucast = {0};
char str[80];
- uint16_t filter_type;
- int rc, i;
+ uint16_t filter_type = 0;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
- filter_type = conf->filter_type | qdev->vxlan_filter_type;
- /* First determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
- if (clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Wrong filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
+ if (IS_VF(edev))
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
ucast.opcode = ECORE_FILTER_ADD;
/* Skip MAC/VLAN if filter is based on VNI */
@@ -2308,22 +2430,34 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
if (rc != ECORE_SUCCESS)
return rc;
- qdev->vxlan_filter_type = filter_type;
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = filter_type;
+ if (!qdev->vxlan.enable)
+ return qede_vxlan_enable(eth_dev, clss, true, true);
- DP_INFO(edev, "Enabling VXLAN tunneling\n");
- qede_set_cmn_tunn_param(&tunn, clss, true, true);
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
- &tunn, ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- }
- }
- qdev->num_tunn_filters++; /* Filter added successfully */
break;
case RTE_ETH_FILTER_DELETE:
+ if (IS_VF(edev))
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
ucast.opcode = ECORE_FILTER_REMOVE;
if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
@@ -2337,33 +2471,16 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
if (rc != ECORE_SUCCESS)
return rc;
- qdev->vxlan_filter_type = filter_type;
- qdev->num_tunn_filters--;
+ qdev->vxlan.num_filters--;
/* Disable VXLAN if VXLAN filters become 0 */
- if (qdev->num_tunn_filters == 0) {
- DP_INFO(edev, "Disabling VXLAN tunneling\n");
-
- /* Use 0 as tunnel mode */
- qede_set_cmn_tunn_param(&tunn, clss, false, true);
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
- ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev,
- "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- break;
- }
- }
- }
+ if (qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev, clss, false, true);
break;
default:
DP_ERR(edev, "Unsupported operation %d\n", filter_op);
return -EINVAL;
}
- DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
return 0;
}
@@ -2491,6 +2608,8 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
};
static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2523,6 +2642,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
+ adapter->ethdev = eth_dev;
edev = &adapter->edev;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
@@ -2583,7 +2703,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
*/
- if (edev->num_hwfns > 1 && IS_PF(edev)) {
+ if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
rc = rte_eal_alarm_set(timer_period * US_PER_S,
qede_poll_sp_sb_cb,
(void *)eth_dev);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a3254b12..021de5c0 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -29,15 +29,11 @@
#include "base/ecore_chain.h"
#include "base/ecore_status.h"
#include "base/ecore_hsi_eth.h"
-#include "base/ecore_dev_api.h"
#include "base/ecore_iov_api.h"
#include "base/ecore_cxt.h"
#include "base/nvm_cfg.h"
-#include "base/ecore_iov_api.h"
#include "base/ecore_sp_commands.h"
#include "base/ecore_l2.h"
-#include "base/ecore_dev_api.h"
-#include "base/ecore_l2.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -49,8 +45,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 5
-#define QEDE_PMD_VERSION_REVISION 2
+#define QEDE_PMD_VERSION_MINOR 6
+#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
@@ -122,7 +118,6 @@
#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV
-#define QEDE_VXLAN_DEF_PORT 8472
extern char fw_file[];
@@ -171,6 +166,13 @@ struct qede_fdir_info {
SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
};
+struct qede_vxlan_tunn {
+ bool enable;
+ uint16_t num_filters;
+ uint16_t filter_type;
+#define QEDE_VXLAN_DEF_PORT (4789)
+ uint16_t udp_port;
+};
/*
* Structure to store private data for each port.
@@ -200,11 +202,11 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
- uint16_t num_tunn_filters;
- uint16_t vxlan_filter_type;
+ struct qede_vxlan_tunn vxlan;
struct qede_fdir_info fdir_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ void *ethdev;
};
/* Non-static functions */
@@ -221,6 +223,9 @@ int qed_fill_eth_dev_info(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+int qede_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete);
+
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
enum rte_filter_op op, void *arg);
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 7bd5c5d6..da6364ee 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -53,7 +53,7 @@ int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "flowdir is disabled\n");
break;
case RTE_FDIR_MODE_PERFECT:
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
qdev->fdir_info.arfs.arfs_enable = false;
return -ENOTSUP;
@@ -171,8 +171,8 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
&qdev->fdir_info.arfs);
}
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
- rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
- (dma_addr_t)mz->phys_addr,
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
+ (dma_addr_t)mz->iova,
pkt_len,
fdir_filter->action.rx_queue,
0, add);
@@ -386,7 +386,7 @@ qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
switch (filter_op) {
case RTE_ETH_FILTER_NOP:
/* Typically used to query flowdir support */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
@@ -425,7 +425,7 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
switch (filter_op) {
case RTE_ETH_FILTER_NOP:
/* Typically used to query fdir support */
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
DP_ERR(edev, "flowdir is not supported in 100G mode\n");
return -ENOTSUP;
}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 9864bb44..246f0fd3 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -40,14 +40,19 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24
uint32_t flash_size;
- uint8_t mf_mode;
+ bool b_arfs_capable;
+ bool b_inter_pf_switch;
bool tx_switching;
u16 mtu;
+ bool smart_an;
+
/* Out param for qede */
bool vxlan_enable;
bool gre_enable;
bool geneve_enable;
+
+ enum ecore_dev_type dev_type;
};
struct qed_dev_eth_info {
@@ -79,6 +84,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3)
+#define QED_LINK_OVERRIDE_EEE_CONFIG (1 << 5)
uint32_t override_flags;
bool autoneg;
uint32_t adv_speeds;
@@ -87,6 +93,7 @@ struct qed_link_params {
#define QED_LINK_PAUSE_RX_ENABLE (1 << 1)
#define QED_LINK_PAUSE_TX_ENABLE (1 << 2)
uint32_t pause_config;
+ struct ecore_link_eee_params eee;
};
struct qed_link_output {
@@ -97,9 +104,15 @@ struct qed_link_output {
uint32_t speed; /* In Mb/s */
uint32_t adv_speed; /* Speed mask */
uint8_t duplex; /* In DUPLEX defs */
- uint8_t port; /* In PORT defs */
+ uint16_t port; /* In PORT defs */
bool autoneg;
uint32_t pause_config;
+
+ /* EEE - capability & param */
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct ecore_link_eee_params eee;
};
struct qed_slowpath_params {
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index a6ff7af2..ae187321 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.30.12.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -36,6 +36,7 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
{
edev->regview = pci_dev->mem_resource[0].addr;
edev->doorbells = pci_dev->mem_resource[2].addr;
+ edev->db_size = pci_dev->mem_resource[2].len;
}
static int
@@ -221,10 +222,11 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
static int qed_slowpath_start(struct ecore_dev *edev,
struct qed_slowpath_params *params)
{
+ struct ecore_drv_load_params drv_load_params;
+ struct ecore_hw_init_params hw_init_params;
+ struct ecore_mcp_drv_version drv_version;
const uint8_t *data = NULL;
struct ecore_hwfn *hwfn;
- struct ecore_mcp_drv_version drv_version;
- struct ecore_hw_init_params hw_init_params;
struct ecore_ptt *p_ptt;
int rc;
@@ -280,8 +282,13 @@ static int qed_slowpath_start(struct ecore_dev *edev,
hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
- hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
- hw_init_params.avoid_eng_reset = false;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = ECORE_OVERRIDE_FORCE_LOAD_ALWAYS;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
rc = ecore_hw_init(edev, &hw_init_params);
if (rc) {
DP_ERR(edev, "ecore_hw_init failed\n");
@@ -335,6 +342,7 @@ err:
static int
qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(edev);
struct ecore_ptt *ptt = NULL;
struct ecore_tunnel_info *tun = &edev->tunnel;
@@ -357,6 +365,7 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
dev_info->num_hwfns = edev->num_hwfns;
dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+ dev_info->dev_type = edev->type;
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
@@ -367,9 +376,14 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
dev_info->fw_eng = FW_ENGINEERING_VERSION;
if (IS_PF(edev)) {
- dev_info->mf_mode = edev->mf_mode;
+ dev_info->b_inter_pf_switch =
+ OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits);
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &edev->mf_bits))
+ dev_info->b_arfs_capable = true;
dev_info->tx_switching = false;
+ dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn);
+
ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
if (ptt) {
ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
@@ -412,7 +426,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
info->num_queues +=
FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
- if (edev->p_iov_info)
+ if (IS_ECORE_SRIOV(edev))
max_vf_vlan_filters = edev->p_iov_info->total_vfs *
ECORE_ETH_VF_NUM_VLAN_FILTERS;
info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
@@ -423,7 +437,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
} else {
ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
&info->num_queues);
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
info->num_queues += queues;
}
@@ -479,6 +493,7 @@ qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
}
static void qed_fill_link(struct ecore_hwfn *hwfn,
+ __rte_unused struct ecore_ptt *ptt,
struct qed_link_output *if_link)
{
struct ecore_mcp_link_params params;
@@ -529,17 +544,42 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
if (params.pause.forced_tx)
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ if (link_caps.default_eee == ECORE_MCP_EEE_UNSUPPORTED) {
+ if_link->eee_supported = false;
+ } else {
+ if_link->eee_supported = true;
+ if_link->eee_active = link.eee_active;
+ if_link->sup_caps = link_caps.eee_speed_caps;
+ /* MFW clears adv_caps on eee disable; use configured value */
+ if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
+ params.eee.adv_caps;
+ if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
+ if_link->eee.enable = params.eee.enable;
+ if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
+ if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
+ }
}
static void
qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
{
- qed_fill_link(&edev->hwfns[0], if_link);
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
-#ifdef CONFIG_QED_SRIOV
- for_each_hwfn(cdev, i)
- qed_inform_vf_link_state(&cdev->hwfns[i]);
-#endif
+ hwfn = &edev->hwfns[0];
+ if (IS_PF(edev)) {
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
+
+ qed_fill_link(hwfn, ptt, if_link);
+
+ if (ptt)
+ ecore_ptt_release(hwfn, ptt);
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
}
static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
@@ -578,6 +618,10 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
link_params->pause.forced_tx = false;
}
+ if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
+ memcpy(&link_params->eee, &params->eee,
+ sizeof(link_params->eee));
+
rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
ecore_ptt_release(hwfn, ptt);
@@ -587,9 +631,10 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
void qed_link_update(struct ecore_hwfn *hwfn)
{
- struct qed_link_output if_link;
+ struct ecore_dev *edev = hwfn->p_dev;
+ struct qede_dev *qdev = (struct qede_dev *)edev;
- qed_fill_link(hwfn, &if_link);
+ qede_link_update((struct rte_eth_dev *)qdev->ethdev, 0);
}
static int qed_drain(struct ecore_dev *edev)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 5c3613c7..8e8536f8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -28,7 +28,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
}
rxq->sw_rx_ring[idx].mbuf = new_mb;
rxq->sw_rx_ring[idx].page_offset = 0;
- mapping = rte_mbuf_data_dma_addr_default(new_mb);
+ mapping = rte_mbuf_data_iova_default(new_mb);
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
@@ -364,12 +364,12 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
uint16_t sb_id)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
@@ -379,7 +379,7 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
return rc;
}
@@ -453,7 +453,7 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
if (fp->sb_info) {
OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
fp->sb_info->sb_phys,
- sizeof(struct status_block));
+ sizeof(struct status_block_e4));
rte_free(fp->sb_info);
fp->sb_info = NULL;
}
@@ -555,7 +555,7 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
params.queue_id = rx_queue_id / edev->num_hwfns;
params.vport_id = 0;
params.stats_id = params.vport_id;
- params.sb = fp->sb_info->igu_sb_id;
+ params.p_sb = fp->sb_info;
DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
fp->rxq->queue_id, fp->sb_info->igu_sb_id);
params.sb_idx = RX_PI;
@@ -614,7 +614,7 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
params.queue_id = tx_queue_id / edev->num_hwfns;
params.vport_id = 0;
params.stats_id = params.vport_id;
- params.sb = fp->sb_info->igu_sb_id;
+ params.p_sb = fp->sb_info;
DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
fp->txq->queue_id, fp->sb_info->igu_sb_id);
params.sb_idx = TX_PI(0); /* tc = 0 */
@@ -780,7 +780,7 @@ int qede_start_queues(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
uint8_t id;
- int rc;
+ int rc = -1;
for_each_rss(id) {
rc = qede_rx_queue_start(eth_dev, id);
@@ -844,6 +844,109 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
return 0;
}
+/* Returns outer L3 and L4 packet_type for tunneled packets */
+static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ if (ipv6_hdr->proto == IPPROTO_TCP)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else if (ipv6_hdr->proto == IPPROTO_UDP)
+ packet_type |= RTE_PTYPE_L4_UDP;
+ }
+
+ return packet_type;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
+{
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER,
+ /* Frags with no VLAN */
+ [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER,
+ /* VLANs */
+ [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ /* Frags with VLAN */
+ [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ /* Bits (4,5) provides frag and VLAN info */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val];
+
+ return RTE_PTYPE_UNKNOWN;
+}
+
static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
{
uint16_t val;
@@ -851,24 +954,68 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
/* Lookup table */
static const uint32_t
ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
- [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
- [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
- [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
- [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER,
+ /* Frags with no VLAN */
+ [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER,
+ /* VLANs */
+ [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ /* Frags with VLAN */
+ [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER_VLAN,
+ [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_L2_ETHER_VLAN,
};
/* Bits (0..3) provides L3/L4 protocol type */
+ /* Bits (4,5) provides frag and VLAN info */
val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
(PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
- PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
if (val < QEDE_PKT_TYPE_MAX)
- return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
- else
- return RTE_PTYPE_UNKNOWN;
+ return ptype_lkup_tbl[val];
+
+ return RTE_PTYPE_UNKNOWN;
}
static inline uint8_t
@@ -917,7 +1064,7 @@ qede_reuse_page(__rte_unused struct qede_dev *qdev,
curr_prod = &rxq->sw_rx_ring[idx];
*curr_prod = *curr_cons;
- new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+ new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
curr_prod->page_offset;
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
@@ -1100,6 +1247,27 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
return 0;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+static inline void
+print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
+ uint8_t bitfield)
+{
+ PMD_RX_LOG(INFO, rxq,
+ "len 0x%x bf 0x%x hash_val 0x%x"
+ " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
+ " inner_l2=%s inner_l3=%s inner_l4=%s\n",
+ m->data_len, bitfield, m->hash.rss,
+ (unsigned long)m->ol_flags,
+ rte_get_ptype_l2_name(m->packet_type),
+ rte_get_ptype_l3_name(m->packet_type),
+ rte_get_ptype_l4_name(m->packet_type),
+ rte_get_ptype_tunnel_name(m->packet_type),
+ rte_get_ptype_inner_l2_name(m->packet_type),
+ rte_get_ptype_inner_l3_name(m->packet_type),
+ rte_get_ptype_inner_l4_name(m->packet_type));
+}
+#endif
+
uint16_t
qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -1120,7 +1288,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t parse_flag;
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
uint8_t bitfield_val;
- enum rss_hash_type htype;
#endif
uint8_t tunn_parse_flag;
uint8_t j;
@@ -1214,8 +1381,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
bitfield_val = fp_cqe->bitfields;
- htype = (uint8_t)GET_FIELD(bitfield_val,
- ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
#endif
} else {
parse_flag =
@@ -1226,8 +1391,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
bitfield_val = cqe_start_tpa->bitfields;
- htype = (uint8_t)GET_FIELD(bitfield_val,
- ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
#endif
rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
}
@@ -1247,8 +1410,17 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
else
flags = fp_cqe->tunnel_pars_flags.flags;
tunn_parse_flag = flags;
+ /* Tunnel_type */
packet_type =
qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+ /* Inner header */
+ packet_type |=
+ qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+ /* Outer L3/L4 types is not available in CQE */
+ packet_type |=
+ qede_rx_cqe_to_pkt_type_outer(rx_mb);
}
} else {
PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
@@ -1275,21 +1447,16 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
}
- if (CQE_HAS_VLAN(parse_flag)) {
- ol_flags |= PKT_RX_VLAN_PKT;
+ if (CQE_HAS_VLAN(parse_flag) ||
+ CQE_HAS_OUTER_VLAN(parse_flag)) {
+ /* Note: FW doesn't indicate Q-in-Q packet */
+ ol_flags |= PKT_RX_VLAN;
if (qdev->vlan_strip_flg) {
ol_flags |= PKT_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
- if (CQE_HAS_OUTER_VLAN(parse_flag)) {
- ol_flags |= PKT_RX_QINQ_PKT;
- if (qdev->vlan_strip_flg) {
- rx_mb->vlan_tci = vlan_tci;
- ol_flags |= PKT_RX_QINQ_STRIPPED;
- }
- rx_mb->vlan_tci_outer = 0;
- }
+
/* RSS Hash */
if (qdev->rss_enable) {
ol_flags |= PKT_RX_RSS_HASH;
@@ -1341,11 +1508,9 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb->ol_flags = ol_flags;
rx_mb->data_len = len;
rx_mb->packet_type = packet_type;
- PMD_RX_LOG(INFO, rxq,
- "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
- " ol_flags 0x%04lx\n",
- packet_type, len, htype, rx_mb->hash.rss,
- (unsigned long)ol_flags);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
if (!tpa_start_flg) {
rx_mb->nb_segs = fp_cqe->bd_num;
rx_mb->pkt_len = pkt_len;
@@ -1400,7 +1565,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
} else if (nb_segs == 1) {
@@ -1410,7 +1575,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
} else {
@@ -1418,7 +1583,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
nb_segs++;
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
@@ -1801,7 +1966,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nbds++;
/* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
mbuf->data_len);
bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
@@ -1814,11 +1979,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nbds++;
/* BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
hdr_size);
/* BD2 */
QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
+ rte_mbuf_data_iova(mbuf)),
mbuf->data_len - hdr_size);
bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
if (mplsoudp_flg) {
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index b551fd6a..acf9e475 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -84,7 +84,8 @@
/* Macros for non-tunnel packet types lkup table */
#define QEDE_PKT_TYPE_UNKNOWN 0x0
-#define QEDE_PKT_TYPE_MAX 0xf
+#define QEDE_PKT_TYPE_MAX 0x3f
+
#define QEDE_PKT_TYPE_IPV4 0x1
#define QEDE_PKT_TYPE_IPV6 0x2
#define QEDE_PKT_TYPE_IPV4_TCP 0x5
@@ -92,6 +93,20 @@
#define QEDE_PKT_TYPE_IPV4_UDP 0x9
#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+/* For frag pkts, corresponding IP bits is set */
+#define QEDE_PKT_TYPE_IPV4_FRAG 0x11
+#define QEDE_PKT_TYPE_IPV6_FRAG 0x12
+
+#define QEDE_PKT_TYPE_IPV4_VLAN 0x21
+#define QEDE_PKT_TYPE_IPV6_VLAN 0x22
+#define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25
+#define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26
+#define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29
+#define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a
+
+#define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31
+#define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32
+
/* Macros for tunneled packets with next protocol lkup table */
#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
#define QEDE_PKT_TYPE_TUNN_GRE 0x2
@@ -99,12 +114,12 @@
/* Bit 2 is don't care bit */
#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
-#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
-#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
@@ -112,7 +127,7 @@
#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
-#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
diff --git a/drivers/net/ring/Makefile b/drivers/net/ring/Makefile
index b7e1a378..085ffa57 100644
--- a/drivers/net/ring/Makefile
+++ b/drivers/net/ring/Makefile
@@ -38,8 +38,11 @@ LIB = librte_pmd_ring.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vdev
-EXPORT_MAP := rte_eth_ring_version.map
+EXPORT_MAP := rte_pmd_ring_version.map
LIBABIVER := 2
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 464d3d38..a73c631f 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -36,9 +36,8 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_string_fns.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_errno.h>
@@ -190,7 +189,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
@@ -214,6 +213,8 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ipackets = rx_total;
stats->opackets = tx_total;
stats->oerrors = tx_err_total;
+
+ return 0;
}
static void
@@ -356,7 +357,6 @@ do_eth_dev_ring_create(const char *name,
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
data->kdrv = RTE_KDRV_NONE;
data->numa_node = numa_node;
@@ -394,7 +394,7 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
};
char args_str[32] = { 0 };
char ring_name[32] = { 0 };
- uint8_t port_id = RTE_MAX_ETHPORTS;
+ uint16_t port_id = RTE_MAX_ETHPORTS;
int ret;
/* do some parameter checking */
diff --git a/drivers/net/ring/rte_eth_ring_version.map b/drivers/net/ring/rte_pmd_ring_version.map
index 1f785d94..1f785d94 100644
--- a/drivers/net/ring/rte_eth_ring_version.map
+++ b/drivers/net/ring/rte_pmd_ring_version.map
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 57aa963b..2cfd62a2 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -65,13 +65,19 @@ CFLAGS += -Wbad-function-cast
CFLAGS_BASE_DRIVER += -Wno-empty-body
else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+# Suppress ICC false positive warning on 'bulk' may be used before its
+# value is set
+CFLAGS_sfc_ef10_tx.o += -wd3656
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
#
# List of base driver object files for which
# special CFLAGS above should be applied
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), \
$(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index 695bb847..e1faf1dd 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -123,29 +123,33 @@ ef10_filter_init(
#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST));
EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST ==
- MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST));
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST));
#undef MATCH_MASK
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
@@ -186,27 +190,27 @@ efx_mcdi_filter_op_add(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
- MC_CMD_FILTER_OP_OUT_LEN)];
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN)];
efx_rc_t rc;
memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+ req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
switch (filter_op) {
case MC_CMD_FILTER_OP_IN_OP_REPLACE:
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO,
handle->efh_lo);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI,
handle->efh_hi);
/* Fall through */
case MC_CMD_FILTER_OP_IN_OP_INSERT:
case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op);
break;
default:
EFSYS_ASSERT(0);
@@ -214,82 +218,123 @@ efx_mcdi_filter_op_add(
goto fail1;
}
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID,
EVB_PORT_ID_ASSIGNED);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS,
spec->efs_match_flags);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
- MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
spec->efs_dmaq_id);
+
+#if EFSYS_OPT_RX_SCALE
if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
- spec->efs_rss_context);
+ uint32_t rss_context;
+
+ if (spec->efs_rss_context == EFX_RSS_CONTEXT_DEFAULT)
+ rss_context = enp->en_rss_context;
+ else
+ rss_context = spec->efs_rss_context;
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT,
+ rss_context);
}
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+#endif
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE,
spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
- MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
- MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
- MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT);
if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
/*
* NOTE: Unlike most MCDI requests, the filter fields
* are presented in network (big endian) byte order.
*/
- memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC),
spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
- memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC),
spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
- MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT,
__CPU_TO_BE_16(spec->efs_rem_port));
- MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT,
__CPU_TO_BE_16(spec->efs_loc_port));
- MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE,
__CPU_TO_BE_16(spec->efs_ether_type));
- MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN,
__CPU_TO_BE_16(spec->efs_inner_vid));
- MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+ MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN,
__CPU_TO_BE_16(spec->efs_outer_vid));
/* IP protocol (in low byte, high byte is zero) */
- MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+ MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO,
spec->efs_ip_proto);
EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
- MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
- MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);
- memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP),
&spec->efs_rem_host.eo_byte[0],
- MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
- memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+ MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP),
&spec->efs_loc_host.eo_byte[0],
- MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN);
+
+ /*
+ * On Medford, filters for encapsulated packets match based on
+ * the ether type and IP protocol in the outer frame. In
+ * addition we need to fill in the VNI or VSID type field.
+ */
+ switch (spec->efs_encap_type) {
+ case EFX_TUNNEL_PROTOCOL_NONE:
+ break;
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ MCDI_IN_POPULATE_DWORD_1(req,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ?
+ MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN :
+ MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE);
+ break;
+ case EFX_TUNNEL_PROTOCOL_NVGRE:
+ MCDI_IN_POPULATE_DWORD_1(req,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VSID_TYPE,
+ MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail2;
+ }
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail2;
+ goto fail3;
}
- if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
rc = EMSGSIZE;
- goto fail3;
+ goto fail4;
}
- handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
- handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI);
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
@@ -308,24 +353,24 @@ efx_mcdi_filter_op_delete(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
- MC_CMD_FILTER_OP_OUT_LEN)];
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN)];
efx_rc_t rc;
memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+ req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
switch (filter_op) {
case MC_CMD_FILTER_OP_IN_OP_REMOVE:
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP,
MC_CMD_FILTER_OP_IN_OP_REMOVE);
break;
case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP,
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
break;
default:
@@ -334,8 +379,8 @@ efx_mcdi_filter_op_delete(
goto fail1;
}
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
- MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi);
efx_mcdi_execute_quiet(enp, &req);
@@ -344,7 +389,7 @@ efx_mcdi_filter_op_delete(
goto fail2;
}
- if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
rc = EMSGSIZE;
goto fail3;
}
@@ -390,6 +435,8 @@ ef10_filter_equal(
return (B_FALSE);
if (left->efs_ip_proto != right->efs_ip_proto)
return (B_FALSE);
+ if (left->efs_encap_type != right->efs_encap_type)
+ return (B_FALSE);
return (B_TRUE);
@@ -549,10 +596,6 @@ ef10_filter_add_internal(
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
-#if EFSYS_OPT_RX_SCALE
- spec->efs_rss_context = enp->en_rss_context;
-#endif
-
hash = ef10_filter_hash(spec);
/*
@@ -1194,6 +1237,108 @@ fail1:
return (rc);
}
+typedef struct ef10_filter_encap_entry_s {
+ uint16_t ether_type;
+ efx_tunnel_protocol_t encap_type;
+ uint32_t inner_frame_match;
+} ef10_filter_encap_entry_t;
+
+#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \
+ { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match }
+
+static ef10_filter_encap_entry_t ef10_filter_encap_list[] = {
+ EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST),
+
+ EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST),
+
+ EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST),
+ EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST),
+};
+
+#undef EF10_ENCAP_FILTER_ENTRY
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_encap_filters(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <=
+ EFX_ARRAY_SIZE(table->eft_encap_filter_indexes));
+
+ /*
+ * On Medford, full-featured firmware can identify packets as being
+ * tunnel encapsulated, even if no encapsulated packet offloads are in
+ * use. When packets are identified as such, ordinary filters are not
+ * applied, only ones specific to encapsulated packets. Hence we need to
+ * insert filters for encapsulated packets in order to receive them.
+ *
+ * Separate filters need to be inserted for each ether type,
+ * encapsulation type, and inner frame type (unicast or multicast). To
+ * keep things simple and reduce the number of filters needed, catch-all
+ * filters for all combinations of types are inserted, even if
+ * all_unicst or all_mulcst have not been set. (These catch-all filters
+ * may well, however, fail to insert on unprivileged functions.)
+ */
+ table->eft_encap_filter_count = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) {
+ efx_filter_spec_t spec;
+ ef10_filter_encap_entry_t *encap_filter =
+ &ef10_filter_encap_list[i];
+
+ /*
+ * Skip multicast filters if we've not been asked for
+ * any multicast traffic.
+ */
+ if ((mulcst == B_FALSE) &&
+ (encap_filter->inner_frame_match ==
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST))
+ continue;
+
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ table->eft_default_rxq);
+ efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type);
+ rc = efx_filter_spec_set_encap_type(&spec,
+ encap_filter->encap_type,
+ encap_filter->inner_frame_match);
+ if (rc != 0)
+ goto fail1;
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &table->eft_encap_filter_indexes[
+ table->eft_encap_filter_count]);
+ if (rc != 0) {
+ if (rc != EACCES)
+ goto fail2;
+ } else {
+ table->eft_encap_filter_count++;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
static void
ef10_filter_remove_old(
__in efx_nic_t *enp)
@@ -1289,6 +1434,12 @@ ef10_filter_reconfigure(
}
table->eft_mulcst_filter_count = 0;
+ for (i = 0; i < table->eft_encap_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_encap_filter_indexes[i]);
+ }
+ table->eft_encap_filter_count = 0;
+
return (0);
}
@@ -1306,6 +1457,10 @@ ef10_filter_reconfigure(
ef10_filter_set_entry_auto_old(table,
table->eft_mulcst_filter_indexes[i]);
}
+ for (i = 0; i < table->eft_encap_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_encap_filter_indexes[i]);
+ }
/*
* Insert or renew unicast filters.
@@ -1423,6 +1578,13 @@ ef10_filter_reconfigure(
}
}
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ /* Try to insert filters for encapsulated packets. */
+ (void) ef10_filter_insert_encap_filters(enp,
+ mulcst || all_mulcst || brdcst,
+ filter_flags);
+ }
+
/* Remove old filters which were not renewed */
ef10_filter_remove_old(enp);
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index 8c3dffee..8f9eb7a3 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -898,8 +898,21 @@ ef10_rx_scatter_enable(
#if EFSYS_OPT_RX_SCALE
extern __checkReturn efx_rc_t
+ef10_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context);
+
+extern __checkReturn efx_rc_t
ef10_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
@@ -907,12 +920,14 @@ ef10_rx_scale_mode_set(
extern __checkReturn efx_rc_t
ef10_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n);
extern __checkReturn efx_rc_t
ef10_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n);
@@ -1005,6 +1020,13 @@ typedef struct ef10_filter_entry_s {
/* Allow for the broadcast address to be added to the multicast list */
#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+/*
+ * For encapsulated packets, there is one filter each for each combination of
+ * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or
+ * multicast inner frames.
+ */
+#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12
+
typedef struct ef10_filter_table_s {
ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
efx_rxq_t *eft_default_rxq;
@@ -1016,6 +1038,9 @@ typedef struct ef10_filter_table_s {
EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
uint32_t eft_mulcst_filter_count;
boolean_t eft_using_all_mulcst;
+ uint32_t eft_encap_filter_indexes[
+ EFX_EF10_FILTER_ENCAP_FILTERS_MAX];
+ uint32_t eft_encap_filter_count;
} ef10_filter_table_t;
__checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index aac2679c..58d1b0af 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -1072,6 +1072,16 @@ ef10_get_datapath_caps(
encp->enc_mac_stats_40g_tx_size_bins =
CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+ /*
+ * Check if firmware supports VXLAN and NVGRE tunnels.
+ * The capability indicates Geneve protocol support as well.
+ */
+ if (CAP_FLAG(flags, VXLAN_NVGRE))
+ encp->enc_tunnel_encapsulations_supported =
+ (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
+ (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
+ (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
+
#undef CAP_FLAG
#undef CAP_FLAG2
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 661caa88..849f674c 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -159,7 +159,7 @@ fail1:
static __checkReturn efx_rc_t
efx_mcdi_rss_context_alloc(
__in efx_nic_t *enp,
- __in efx_rx_scale_support_t scale_support,
+ __in efx_rx_scale_context_type_t type,
__in uint32_t num_queues,
__out uint32_t *rss_contextp)
{
@@ -175,7 +175,7 @@ efx_mcdi_rss_context_alloc(
goto fail1;
}
- switch (scale_support) {
+ switch (type) {
case EFX_RX_SCALE_EXCLUSIVE:
context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
break;
@@ -461,7 +461,7 @@ ef10_rx_init(
* Allocated an exclusive RSS context, which allows both the
* indirection table and key to be modified.
*/
- enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
} else {
/*
@@ -469,7 +469,7 @@ ef10_rx_init(
* operation without support for RSS. The pseudo-header in
* received packets will not contain a Toeplitz hash value.
*/
- enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
}
@@ -491,8 +491,51 @@ ef10_rx_scatter_enable(
#if EFSYS_OPT_RX_SCALE
__checkReturn efx_rc_t
+ef10_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_rc_t rc;
+
+ rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_rc_t rc;
+
+ rc = efx_mcdi_rss_context_free(enp, rss_context);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
ef10_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert)
@@ -507,13 +550,16 @@ ef10_rx_scale_mode_set(
goto fail1;
}
- if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
- rc = ENOTSUP;
- goto fail2;
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ rss_context = enp->en_rss_context;
}
if ((rc = efx_mcdi_rss_context_set_flags(enp,
- enp->en_rss_context, type)) != 0)
+ rss_context, type)) != 0)
goto fail3;
return (0);
@@ -533,18 +579,24 @@ fail1:
__checkReturn efx_rc_t
ef10_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n)
{
efx_rc_t rc;
- if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
- rc = ENOTSUP;
- goto fail1;
+ EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE ==
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ rss_context = enp->en_rss_context;
}
- if ((rc = efx_mcdi_rss_context_set_key(enp,
- enp->en_rss_context, key, n)) != 0)
+ if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0)
goto fail2;
return (0);
@@ -562,18 +614,23 @@ fail1:
__checkReturn efx_rc_t
ef10_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n)
{
efx_rc_t rc;
- if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
- rc = ENOTSUP;
- goto fail1;
+
+ if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
+ if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ rss_context = enp->en_rss_context;
}
if ((rc = efx_mcdi_rss_context_set_table(enp,
- enp->en_rss_context, table, n)) != 0)
+ rss_context, table, n)) != 0)
goto fail2;
return (0);
@@ -964,11 +1021,10 @@ ef10_rx_fini(
__in efx_nic_t *enp)
{
#if EFSYS_OPT_RX_SCALE
- if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE)
(void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
- }
enp->en_rss_context = 0;
- enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
#else
_NOTE(ARGUNUSED(enp))
#endif /* EFSYS_OPT_RX_SCALE */
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index 7eabc370..57fba052 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -1088,6 +1088,14 @@ efx_bist_stop(
#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
#define EFX_FEATURE_PACKED_STREAM 0x00004000
+typedef enum efx_tunnel_protocol_e {
+ EFX_TUNNEL_PROTOCOL_NONE = 0,
+ EFX_TUNNEL_PROTOCOL_VXLAN,
+ EFX_TUNNEL_PROTOCOL_GENEVE,
+ EFX_TUNNEL_PROTOCOL_NVGRE,
+ EFX_TUNNEL_NPROTOS
+} efx_tunnel_protocol_t;
+
typedef struct efx_nic_cfg_s {
uint32_t enc_board_type;
uint32_t enc_phy_type;
@@ -1119,6 +1127,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_rx_prefix_size;
uint32_t enc_rx_buf_align_start;
uint32_t enc_rx_buf_align_end;
+ uint32_t enc_rx_scale_max_exclusive_contexts;
#if EFSYS_OPT_LOOPBACK
efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
#endif /* EFSYS_OPT_LOOPBACK */
@@ -1187,6 +1196,7 @@ typedef struct efx_nic_cfg_s {
boolean_t enc_rx_var_packed_stream_supported;
boolean_t enc_pm_and_rxdp_counters;
boolean_t enc_mac_stats_40g_tx_size_bins;
+ uint32_t enc_tunnel_encapsulations_supported;
/* External port identifier */
uint8_t enc_external_port;
uint32_t enc_mcdi_max_payload_length;
@@ -1873,6 +1883,9 @@ efx_rx_scatter_enable(
__in unsigned int buf_size);
#endif /* EFSYS_OPT_RX_SCATTER */
+/* Handle to represent use of the default RSS context. */
+#define EFX_RSS_CONTEXT_DEFAULT 0xffffffff
+
#if EFSYS_OPT_RX_SCALE
typedef enum efx_rx_hash_alg_e {
@@ -1892,30 +1905,44 @@ typedef enum efx_rx_hash_support_e {
EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
} efx_rx_hash_support_t;
+#define EFX_RSS_KEY_SIZE 40 /* RSS key size (bytes) */
#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
#define EFX_MAXRSS 64 /* RX indirection entry range */
#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
-typedef enum efx_rx_scale_support_e {
- EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */
+typedef enum efx_rx_scale_context_type_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* No RX scale context */
EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
-} efx_rx_scale_support_t;
+} efx_rx_scale_context_type_t;
extern __checkReturn efx_rc_t
-efx_rx_hash_support_get(
+efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
__out efx_rx_hash_support_t *supportp);
extern __checkReturn efx_rc_t
-efx_rx_scale_support_get(
+efx_rx_scale_default_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_context_type_t *typep);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_context_alloc(
__in efx_nic_t *enp,
- __out efx_rx_scale_support_t *supportp);
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context);
extern __checkReturn efx_rc_t
efx_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
@@ -1923,12 +1950,14 @@ efx_rx_scale_mode_set(
extern __checkReturn efx_rc_t
efx_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n);
extern __checkReturn efx_rc_t
efx_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n);
@@ -2214,6 +2243,7 @@ efx_tx_qdestroy(
#define EFX_IPPROTO_TCP 6
#define EFX_IPPROTO_UDP 17
+#define EFX_IPPROTO_GRE 47
/* Use RSS to spread across multiple queues */
#define EFX_FILTER_FLAG_RX_RSS 0x01
@@ -2232,6 +2262,10 @@ efx_tx_qdestroy(
typedef unsigned int efx_filter_flags_t;
+/*
+ * Flags which specify the fields to match on. The values are the same as in the
+ * MC_CMD_FILTER_OP/MC_CMD_FILTER_OP_EXT commands.
+ */
typedef enum efx_filter_match_flags_e {
EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
* address */
@@ -2246,6 +2280,10 @@ typedef enum efx_filter_match_flags_e {
EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
* protocol */
+ /* For encapsulated packets, match all multicast inner frames */
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST = 0x01000000,
+ /* For encapsulated packets, match all unicast inner frames */
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST = 0x02000000,
/* Match otherwise-unmatched multicast and broadcast packets */
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000,
/* Match otherwise-unmatched unicast packets */
@@ -2271,26 +2309,26 @@ typedef enum efx_filter_priority_s {
*/
typedef struct efx_filter_spec_s {
- uint32_t efs_match_flags;
- uint32_t efs_priority:2;
- uint32_t efs_flags:6;
- uint32_t efs_dmaq_id:12;
- uint32_t efs_rss_context;
- uint16_t efs_outer_vid;
- uint16_t efs_inner_vid;
- uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
- uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
- uint16_t efs_ether_type;
- uint8_t efs_ip_proto;
- uint16_t efs_loc_port;
- uint16_t efs_rem_port;
- efx_oword_t efs_rem_host;
- efx_oword_t efs_loc_host;
+ uint32_t efs_match_flags;
+ uint32_t efs_priority:2;
+ uint32_t efs_flags:6;
+ uint32_t efs_dmaq_id:12;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ efx_tunnel_protocol_t efs_encap_type;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
} efx_filter_spec_t;
/* Default values for use in filter specifications */
-#define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff
#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
@@ -2357,6 +2395,11 @@ efx_filter_spec_set_eth_local(
__in uint16_t vid,
__in const uint8_t *addr);
+extern void
+efx_filter_spec_set_ether_type(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t ether_type);
+
extern __checkReturn efx_rc_t
efx_filter_spec_set_uc_def(
__inout efx_filter_spec_t *spec);
@@ -2365,6 +2408,24 @@ extern __checkReturn efx_rc_t
efx_filter_spec_set_mc_def(
__inout efx_filter_spec_t *spec);
+typedef enum efx_filter_inner_frame_match_e {
+ EFX_FILTER_INNER_FRAME_MATCH_OTHER = 0,
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST,
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST
+} efx_filter_inner_frame_match_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_encap_type(
+ __inout efx_filter_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __in efx_filter_inner_frame_match_t inner_frame_match);
+
+#if EFSYS_OPT_RX_SCALE
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_rss_context(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t rss_context);
+#endif
#endif /* EFSYS_OPT_FILTER */
/* HASH */
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
index ba310260..5cab7d87 100644
--- a/drivers/net/sfc/base/efx_filter.c
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -117,10 +117,6 @@ efx_filter_remove(
EFSYS_ASSERT3P(spec, !=, NULL);
EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
-#if EFSYS_OPT_RX_SCALE
- spec->efs_rss_context = enp->en_rss_context;
-#endif
-
return (efop->efo_delete(enp, spec));
}
@@ -302,7 +298,7 @@ efx_filter_spec_init_rx(
memset(spec, 0, sizeof (*spec));
spec->efs_priority = priority;
spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
- spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ spec->efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
spec->efs_dmaq_id = (uint16_t)erp->er_index;
}
@@ -396,6 +392,17 @@ efx_filter_spec_set_eth_local(
return (0);
}
+ void
+efx_filter_spec_set_ether_type(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t ether_type)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_ether_type = ether_type;
+ spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+}
+
/*
* Specify matching otherwise-unmatched unicast in a filter specification
*/
@@ -423,6 +430,88 @@ efx_filter_spec_set_mc_def(
}
+__checkReturn efx_rc_t
+efx_filter_spec_set_encap_type(
+ __inout efx_filter_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __in efx_filter_inner_frame_match_t inner_frame_match)
+{
+ uint32_t match_flags = 0;
+ uint8_t ip_proto;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ switch (encap_type) {
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ ip_proto = EFX_IPPROTO_UDP;
+ break;
+ case EFX_TUNNEL_PROTOCOL_NVGRE:
+ ip_proto = EFX_IPPROTO_GRE;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (inner_frame_match) {
+ case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST:
+ match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
+ break;
+ case EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST:
+ match_flags |= EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST;
+ break;
+ case EFX_FILTER_INNER_FRAME_MATCH_OTHER:
+ /* This is for when specific inner frames are to be matched. */
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ spec->efs_encap_type = encap_type;
+ spec->efs_ip_proto = ip_proto;
+ spec->efs_match_flags |= (match_flags | EFX_FILTER_MATCH_IP_PROTO);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_filter_spec_set_rss_context(
+ __inout efx_filter_spec_t *spec,
+ __in uint32_t rss_context)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ /* The filter must have been created with EFX_FILTER_FLAG_RX_RSS. */
+ if ((spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ spec->efs_rss_context = rss_context;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
#if EFSYS_OPT_SIENA
@@ -454,9 +543,9 @@ siena_filter_spec_from_gen_spec(
else
EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
- /* Falconsiena only has one RSS context */
+ /* Siena only has one RSS context */
if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
- gen_spec->efs_rss_context != 0) {
+ gen_spec->efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
rc = EINVAL;
goto fail1;
}
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index 43add6d9..53fa37ac 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -152,11 +152,17 @@ typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
#endif
#if EFSYS_OPT_RX_SCALE
- efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t,
+ efx_rc_t (*erxo_scale_context_alloc)(efx_nic_t *,
+ efx_rx_scale_context_type_t,
+ uint32_t, uint32_t *);
+ efx_rc_t (*erxo_scale_context_free)(efx_nic_t *, uint32_t);
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, uint32_t,
+ efx_rx_hash_alg_t,
efx_rx_hash_type_t, boolean_t);
- efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t);
- efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *,
- size_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint32_t,
+ uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, uint32_t,
+ unsigned int *, size_t);
uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
uint8_t *);
#endif /* EFSYS_OPT_RX_SCALE */
@@ -648,9 +654,9 @@ struct efx_nic_s {
const efx_vpd_ops_t *en_evpdop;
#endif /* EFSYS_OPT_VPD */
#if EFSYS_OPT_RX_SCALE
- efx_rx_hash_support_t en_hash_support;
- efx_rx_scale_support_t en_rss_support;
- uint32_t en_rss_context;
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_context_type_t en_rss_context_type;
+ uint32_t en_rss_context;
#endif /* EFSYS_OPT_RX_SCALE */
uint32_t en_vport_id;
#if EFSYS_OPT_LICENSING
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
index c8156341..785365d3 100644
--- a/drivers/net/sfc/base/efx_rx.c
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -53,6 +53,7 @@ siena_rx_scatter_enable(
static __checkReturn efx_rc_t
siena_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
@@ -60,12 +61,14 @@ siena_rx_scale_mode_set(
static __checkReturn efx_rc_t
siena_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n);
static __checkReturn efx_rc_t
siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n);
@@ -149,6 +152,8 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {
siena_rx_scatter_enable, /* erxo_scatter_enable */
#endif
#if EFSYS_OPT_RX_SCALE
+ NULL, /* erxo_scale_context_alloc */
+ NULL, /* erxo_scale_context_free */
siena_rx_scale_mode_set, /* erxo_scale_mode_set */
siena_rx_scale_key_set, /* erxo_scale_key_set */
siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
@@ -176,6 +181,8 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_scatter_enable, /* erxo_scatter_enable */
#endif
#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */
+ ef10_rx_scale_context_free, /* erxo_scale_context_free */
ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
ef10_rx_scale_key_set, /* erxo_scale_key_set */
ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
@@ -304,7 +311,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
__checkReturn efx_rc_t
-efx_rx_hash_support_get(
+efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
__out efx_rx_hash_support_t *supportp)
{
@@ -318,7 +325,10 @@ efx_rx_hash_support_get(
goto fail1;
}
- /* Report if resources are available to insert RX hash value */
+ /*
+ * Report the hashing support the client gets by default if it
+ * does not allocate an RSS context itself.
+ */
*supportp = enp->en_hash_support;
return (0);
@@ -330,22 +340,25 @@ fail1:
}
__checkReturn efx_rc_t
-efx_rx_scale_support_get(
+efx_rx_scale_default_support_get(
__in efx_nic_t *enp,
- __out efx_rx_scale_support_t *supportp)
+ __out efx_rx_scale_context_type_t *typep)
{
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
- if (supportp == NULL) {
+ if (typep == NULL) {
rc = EINVAL;
goto fail1;
}
- /* Report if resources are available to support RSS */
- *supportp = enp->en_rss_support;
+ /*
+ * Report the RSS support the client gets by default if it
+ * does not allocate an RSS context itself.
+ */
+ *typep = enp->en_rss_context_type;
return (0);
@@ -354,10 +367,75 @@ fail1:
return (rc);
}
+#endif /* EFSYS_OPT_RX_SCALE */
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_context_type_t type,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_context_alloc == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = erxop->erxo_scale_context_alloc(enp, type,
+ num_queues, rss_contextp)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_context_free == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
__checkReturn efx_rc_t
efx_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert)
@@ -369,7 +447,7 @@ efx_rx_scale_mode_set(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
if (erxop->erxo_scale_mode_set != NULL) {
- if ((rc = erxop->erxo_scale_mode_set(enp, alg,
+ if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
type, insert)) != 0)
goto fail1;
}
@@ -386,6 +464,7 @@ fail1:
__checkReturn efx_rc_t
efx_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n)
{
@@ -395,7 +474,7 @@ efx_rx_scale_key_set(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
- if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0)
+ if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0)
goto fail1;
return (0);
@@ -411,6 +490,7 @@ fail1:
__checkReturn efx_rc_t
efx_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n)
{
@@ -420,7 +500,7 @@ efx_rx_scale_tbl_set(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
- if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0)
+ if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0)
goto fail1;
return (0);
@@ -654,7 +734,7 @@ siena_rx_init(
#if EFSYS_OPT_RX_SCALE
/* The RSS key and indirection table are writable. */
- enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
/* Hardware can insert RX hash with/without RSS */
enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
@@ -773,12 +853,18 @@ fail1:
static __checkReturn efx_rc_t
siena_rx_scale_mode_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
efx_rc_t rc;
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
switch (alg) {
case EFX_RX_HASHALG_LFSR:
EFX_RX_LFSR_HASH(enp, insert);
@@ -794,17 +880,19 @@ siena_rx_scale_mode_set(
type & EFX_RX_HASH_TCPIPV6,
rc);
if (rc != 0)
- goto fail1;
+ goto fail2;
break;
default:
rc = EINVAL;
- goto fail2;
+ goto fail3;
}
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -820,6 +908,7 @@ fail1:
static __checkReturn efx_rc_t
siena_rx_scale_key_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) uint8_t *key,
__in size_t n)
{
@@ -828,6 +917,11 @@ siena_rx_scale_key_set(
unsigned int offset;
efx_rc_t rc;
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
byte = 0;
/* Write Toeplitz IPv4 hash key */
@@ -848,7 +942,7 @@ siena_rx_scale_key_set(
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
- goto fail1;
+ goto fail2;
}
}
@@ -897,7 +991,7 @@ siena_rx_scale_key_set(
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
- goto fail2;
+ goto fail3;
}
}
@@ -909,7 +1003,7 @@ siena_rx_scale_key_set(
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
- goto fail3;
+ goto fail4;
}
}
@@ -921,13 +1015,15 @@ siena_rx_scale_key_set(
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
- goto fail4;
+ goto fail5;
}
}
done:
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -945,6 +1041,7 @@ fail1:
static __checkReturn efx_rc_t
siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
+ __in uint32_t rss_context,
__in_ecount(n) unsigned int *table,
__in size_t n)
{
@@ -955,11 +1052,16 @@ siena_rx_scale_tbl_set(
EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
- if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
rc = EINVAL;
goto fail1;
}
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
uint32_t byte;
@@ -988,12 +1090,14 @@ siena_rx_scale_tbl_set(
/* Verify the entry */
if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
rc = EFAULT;
- goto fail2;
+ goto fail3;
}
}
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
index addbf1c5..19fb7cfb 100644
--- a/drivers/net/sfc/base/hunt_nic.c
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -301,6 +301,13 @@ hunt_board_cfg(
/* Alignment for WPTR updates */
encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+ /*
+ * Maximum number of exclusive RSS contexts which can be allocated. The
+ * hardware supports 64, but 6 are reserved for shared contexts. They
+ * are a global resource so not all may be available.
+ */
+ encp->enc_rx_scale_max_exclusive_contexts = 58;
+
encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
/* No boundary crossing limits */
encp->enc_tx_dma_desc_boundary = 0;
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index 07afac1e..d361d654 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -298,6 +298,13 @@ medford_board_cfg(
/* Alignment for WPTR updates */
encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+ /*
+ * Maximum number of exclusive RSS contexts which can be allocated. The
+ * hardware supports 64, but 6 are reserved for shared contexts. They
+ * are a global resource so not all may be available.
+ */
+ encp->enc_rx_scale_max_exclusive_contexts = 58;
+
encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
/* No boundary crossing limits */
encp->enc_tx_dma_desc_boundary = 0;
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index 129b854b..fcc8f151 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -135,6 +135,9 @@ siena_board_cfg(
/* Alignment for WPTR updates */
encp->enc_rx_push_align = 1;
+ /* There is one RSS context per function */
+ encp->enc_rx_scale_max_exclusive_contexts = 1;
+
encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
/* Fragments must not span 4k boundaries. */
encp->enc_tx_dma_desc_boundary = 4096;
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
index 0405d02b..f428b624 100644
--- a/drivers/net/sfc/efsys.h
+++ b/drivers/net/sfc/efsys.h
@@ -253,7 +253,7 @@ typedef struct __efsys_identifier_s efsys_identifier_t;
/* DMA */
-typedef phys_addr_t efsys_dma_addr_t;
+typedef rte_iova_t efsys_dma_addr_t;
typedef struct efsys_mem_s {
const struct rte_memzone *esm_mz;
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 6cecfc00..49d7e937 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -61,8 +61,8 @@ sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
return ENOMEM;
}
- esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
- if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
+ esmp->esm_addr = mz->iova;
+ if (esmp->esm_addr == RTE_BAD_IOVA) {
(void)rte_memzone_free(mz);
return EFAULT;
}
@@ -501,7 +501,7 @@ sfc_mem_bar_fini(struct sfc_adapter *sa)
* and also known to give a uniform distribution
* (a good distribution of traffic between different CPUs)
*/
-static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
+static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
@@ -510,10 +510,10 @@ static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
};
#endif
+#if EFSYS_OPT_RX_SCALE
static int
sfc_set_rss_defaults(struct sfc_adapter *sa)
{
-#if EFSYS_OPT_RX_SCALE
int rc;
rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
@@ -528,11 +528,11 @@ sfc_set_rss_defaults(struct sfc_adapter *sa)
if (rc != 0)
goto fail_rx_init;
- rc = efx_rx_scale_support_get(sa->nic, &sa->rss_support);
+ rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
if (rc != 0)
goto fail_scale_support_get;
- rc = efx_rx_hash_support_get(sa->nic, &sa->hash_support);
+ rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
if (rc != 0)
goto fail_hash_support_get;
@@ -556,10 +556,14 @@ fail_ev_init:
fail_intr_init:
return rc;
+}
#else
+static int
+sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
+{
return 0;
-#endif
}
+#endif
int
sfc_attach(struct sfc_adapter *sa)
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 286d1ac1..7f11bf22 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -35,6 +35,7 @@
#include <stdbool.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
@@ -48,8 +49,6 @@ extern "C" {
#endif
#if EFSYS_OPT_RX_SCALE
-/** RSS key length (bytes) */
-#define SFC_RSS_KEY_SIZE 40
/** RSS hash offloads mask */
#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
#endif
@@ -225,11 +224,11 @@ struct sfc_adapter {
unsigned int rss_channels;
#if EFSYS_OPT_RX_SCALE
- efx_rx_scale_support_t rss_support;
+ efx_rx_scale_context_type_t rss_support;
efx_rx_hash_support_t hash_support;
efx_rx_hash_type_t rss_hash_types;
unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
- uint8_t rss_key[SFC_RSS_KEY_SIZE];
+ uint8_t rss_key[EFX_RSS_KEY_SIZE];
#endif
/*
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index a7b82784..3f6a604b 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -155,6 +155,10 @@ typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
/** Get number of pending Rx descriptors */
typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+/** Check Rx descriptor status */
+typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq,
+ uint16_t offset);
+
/** Receive datapath definition */
struct sfc_dp_rx {
struct sfc_dp dp;
@@ -170,6 +174,7 @@ struct sfc_dp_rx {
sfc_dp_rx_qpurge_t *qpurge;
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ sfc_dp_rx_qdesc_status_t *qdesc_status;
eth_rx_burst_t pkt_burst;
};
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index c1c34191..94d1b108 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -127,6 +127,12 @@ typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
*/
typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
+/**
+ * Check Tx descriptor status
+ */
+typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq,
+ uint16_t offset);
+
/** Transmit datapath definition */
struct sfc_dp_tx {
struct sfc_dp dp;
@@ -136,12 +142,15 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_TSO 0x2
#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
+#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
+#define SFC_DP_TX_FEAT_REFCNT 0x20
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;
sfc_dp_tx_qstop_t *qstop;
sfc_dp_tx_qtx_ev_t *qtx_ev;
sfc_dp_tx_qreap_t *qreap;
+ sfc_dp_tx_qdesc_status_t *qdesc_status;
eth_tx_burst_t pkt_burst;
};
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 60812cbe..18d60c69 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -177,7 +177,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
++i, ++id) {
struct rte_mbuf *m = objs[i];
struct sfc_ef10_rx_sw_desc *rxd;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
SFC_ASSERT((id & ~ptr_mask) == 0);
rxd = &rxq->sw_ring[id];
@@ -189,7 +189,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
* structure members.
*/
- phys_addr = rte_mbuf_data_dma_addr_default(m);
+ phys_addr = rte_mbuf_data_iova_default(m);
EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
ESF_DZ_RX_KER_BYTE_CNT, buf_size,
ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
@@ -544,6 +544,14 @@ sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
return -ENOTSUP;
}
+static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
+static int
+sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
static uint64_t
sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
@@ -708,5 +716,6 @@ struct sfc_dp_rx sfc_ef10_rx = {
.qpurge = sfc_ef10_rx_qpurge,
.supported_ptypes_get = sfc_ef10_supported_ptypes_get,
.qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_rx_qdesc_status,
.pkt_burst = sfc_ef10_recv_pkts,
};
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index da2b5edb..0454e79a 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -158,17 +158,35 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
do {
struct sfc_ef10_tx_sw_desc *txd;
+ struct rte_mbuf *m;
txd = &txq->sw_ring[completed & ptr_mask];
+ if (txd->mbuf == NULL)
+ continue;
- if (txd->mbuf != NULL) {
- rte_pktmbuf_free(txd->mbuf);
- txd->mbuf = NULL;
+ m = rte_pktmbuf_prefree_seg(txd->mbuf);
+ txd->mbuf = NULL;
+ if (m == NULL)
+ continue;
+
+ if ((nb == RTE_DIM(bulk)) ||
+ ((nb != 0) && (m->pool != bulk[0]->pool))) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
}
+
+ bulk[nb++] = m;
} while (++completed != pending);
+ if (nb != 0)
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
txq->completed = completed;
}
@@ -177,7 +195,7 @@ sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
}
static void
-sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
efx_qword_t *edp)
{
EFX_POPULATE_QWORD_4(*edp,
@@ -323,8 +341,9 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_len = m_seg->pkt_len;
do {
- phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+ unsigned int id = added & ptr_mask;
SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
@@ -332,15 +351,30 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sfc_ef10_tx_qdesc_dma_create(seg_addr,
seg_len, (pkt_len == 0),
- &txq->txq_hw_ring[added & ptr_mask]);
+ &txq->txq_hw_ring[id]);
+
+ /*
+ * rte_pktmbuf_free() is commonly used in DPDK for
+ * recycling packets - the function checks every
+ * segment's reference counter and returns the
+ * buffer to its pool whenever possible;
+ * nevertheless, freeing mbuf segments one by one
+ * may entail some performance decline;
+ * from this point, sfc_efx_tx_reap() does the same job
+ * on its own and frees buffers in bulks (all mbufs
+ * within a bulk belong to the same pool);
+ * from this perspective, individual segment pointers
+ * must be associated with the corresponding SW
+ * descriptors independently so that only one loop
+ * is sufficient on reap to inspect all the buffers
+ */
+ txq->sw_ring[id].mbuf = m_seg;
+
++added;
} while ((m_seg = m_seg->next) != 0);
dma_desc_space -= (added - pkt_start);
-
- /* Assign mbuf to the last used desc */
- txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
}
if (likely(added != txq->added)) {
@@ -367,14 +401,25 @@ sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
pending += sfc_ef10_tx_process_events(txq);
if (pending != completed) {
+ struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
+ unsigned int nb = 0;
+
do {
struct sfc_ef10_tx_sw_desc *txd;
txd = &txq->sw_ring[completed & ptr_mask];
- rte_pktmbuf_free_seg(txd->mbuf);
+ if (nb == RTE_DIM(bulk)) {
+ rte_mempool_put_bulk(bulk[0]->pool,
+ (void *)bulk, nb);
+ nb = 0;
+ }
+
+ bulk[nb++] = txd->mbuf;
} while (++completed != pending);
+ rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
+
txq->completed = completed;
}
@@ -419,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
SFC_EF10_TX_DMA_DESC_LEN_MAX);
- sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
rte_pktmbuf_data_len(pkt),
true, &txq->txq_hw_ring[id]);
@@ -557,7 +602,7 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
txd = &txq->sw_ring[completed & txq->ptr_mask];
if (txd->mbuf != NULL) {
- rte_pktmbuf_free(txd->mbuf);
+ rte_pktmbuf_free_seg(txd->mbuf);
txd->mbuf = NULL;
}
}
@@ -565,6 +610,14 @@ sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
txq->flags &= ~SFC_EF10_TXQ_STARTED;
}
+static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
+static int
+sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
+ __rte_unused uint16_t offset)
+{
+ return -ENOTSUP;
+}
+
struct sfc_dp_tx sfc_ef10_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EF10,
@@ -572,6 +625,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_MULTI_SEG |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
@@ -579,6 +634,7 @@ struct sfc_dp_tx sfc_ef10_tx = {
.qtx_ev = sfc_ef10_tx_qtx_ev,
.qstop = sfc_ef10_tx_qstop,
.qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
.pkt_burst = sfc_ef10_xmit_pkts,
};
@@ -594,5 +650,6 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.qtx_ev = sfc_ef10_tx_qtx_ev,
.qstop = sfc_ef10_tx_qstop,
.qreap = sfc_ef10_tx_qreap,
+ .qdesc_status = sfc_ef10_tx_qdesc_status,
.pkt_burst = sfc_ef10_simple_xmit_pkts,
};
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 12bcd6fa..2f5f86f8 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -33,6 +33,7 @@
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_errno.h>
#include "efx.h"
@@ -145,10 +146,16 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
+
+ if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+
#if EFSYS_OPT_RX_SCALE
if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
dev_info->reta_size = EFX_RSS_TBL_SIZE;
- dev_info->hash_key_size = SFC_RSS_KEY_SIZE;
+ dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
}
#endif
@@ -515,16 +522,18 @@ sfc_tx_queue_release(void *queue)
sfc_adapter_unlock(sa);
}
-static void
+static int
sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_port *port = &sa->port;
uint64_t *mac_stats;
+ int ret;
rte_spinlock_lock(&port->mac_stats_lock);
- if (sfc_port_update_mac_stats(sa) != 0)
+ ret = sfc_port_update_mac_stats(sa);
+ if (ret != 0)
goto unlock;
mac_stats = port->mac_stats_buf;
@@ -581,6 +590,8 @@ sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unlock:
rte_spinlock_unlock(&port->mac_stats_lock);
+ SFC_ASSERT(ret >= 0);
+ return -ret;
}
static void
@@ -991,7 +1002,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
}
for (i = 0; i < nb_mc_addr; ++i) {
- (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
+ rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
EFX_MAC_ADDR_LEN);
mc_addrs += EFX_MAC_ADDR_LEN;
}
@@ -1087,6 +1098,24 @@ sfc_rx_descriptor_done(void *queue, uint16_t offset)
}
static int
+sfc_rx_descriptor_status(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
+}
+
+static int
+sfc_tx_descriptor_status(void *queue, uint16_t offset)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
+
+ return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
+}
+
+static int
sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
@@ -1205,9 +1234,9 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
* locally in 'sfc_adapter' and kept up-to-date
*/
rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
- rss_conf->rss_key_len = SFC_RSS_KEY_SIZE;
+ rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
if (rss_conf->rss_key != NULL)
- rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE);
+ rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE);
sfc_adapter_unlock(sa);
@@ -1252,14 +1281,17 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
- rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
efx_hash_types, B_TRUE);
if (rc != 0)
goto fail_scale_mode_set;
if (rss_conf->rss_key != NULL) {
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key,
+ rc = efx_rx_scale_key_set(sa->nic,
+ EFX_RSS_CONTEXT_DEFAULT,
+ rss_conf->rss_key,
sizeof(sa->rss_key));
if (rc != 0)
goto fail_scale_key_set;
@@ -1275,7 +1307,8 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
return 0;
fail_scale_key_set:
- if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
sa->rss_hash_types, B_TRUE) != 0)
sfc_err(sa, "failed to restore RSS mode");
@@ -1326,7 +1359,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
struct sfc_port *port = &sa->port;
unsigned int *rss_tbl_new;
uint16_t entry;
- int rc;
+ int rc = 0;
if (port->isolated)
@@ -1371,10 +1404,16 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
}
}
- rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE);
- if (rc == 0)
- rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ rss_tbl_new, EFX_RSS_TBL_SIZE);
+ if (rc != 0)
+ goto fail_scale_tbl_set;
+ }
+
+ rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+fail_scale_tbl_set:
bad_reta_entry:
sfc_adapter_unlock(sa);
@@ -1469,6 +1508,8 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.rx_queue_release = sfc_rx_queue_release,
.rx_queue_count = sfc_rx_queue_count,
.rx_descriptor_done = sfc_rx_descriptor_done,
+ .rx_descriptor_status = sfc_rx_descriptor_status,
+ .tx_descriptor_status = sfc_tx_descriptor_status,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
@@ -1751,8 +1792,6 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
- dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
-
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 110dfb89..f2050f65 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -803,7 +803,7 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
}
flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
- flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
return 0;
}
@@ -886,6 +886,170 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
return 0;
}
+#if EFSYS_OPT_RX_SCALE
+static int
+sfc_flow_parse_rss(struct sfc_adapter *sa,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow *flow)
+{
+ unsigned int rxq_sw_index;
+ struct sfc_rxq *rxq;
+ unsigned int rxq_hw_index_min;
+ unsigned int rxq_hw_index_max;
+ const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
+ uint64_t rss_hf;
+ uint8_t *rss_key = NULL;
+ struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
+ unsigned int i;
+
+ if (rss->num == 0)
+ return -EINVAL;
+
+ rxq_sw_index = sa->rxq_count - 1;
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+ rxq_hw_index_min = rxq->hw_index;
+ rxq_hw_index_max = 0;
+
+ for (i = 0; i < rss->num; ++i) {
+ rxq_sw_index = rss->queue[i];
+
+ if (rxq_sw_index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ if (rxq->hw_index < rxq_hw_index_min)
+ rxq_hw_index_min = rxq->hw_index;
+
+ if (rxq->hw_index > rxq_hw_index_max)
+ rxq_hw_index_max = rxq->hw_index;
+ }
+
+ rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
+ if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
+ return -EINVAL;
+
+ if (rss_conf != NULL) {
+ if (rss_conf->rss_key_len != sizeof(sa->rss_key))
+ return -EINVAL;
+
+ rss_key = rss_conf->rss_key;
+ } else {
+ rss_key = sa->rss_key;
+ }
+
+ flow->rss = B_TRUE;
+
+ sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
+ sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
+ sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
+ rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
+
+ for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
+ unsigned int rxq_sw_index = rss->queue[i % rss->num];
+ struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
+ }
+
+ return 0;
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static int
+sfc_flow_filter_insert(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ efx_filter_spec_t *spec = &flow->spec;
+
+#if EFSYS_OPT_RX_SCALE
+ struct sfc_flow_rss *rss = &flow->rss_conf;
+ int rc = 0;
+
+ if (flow->rss) {
+ unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
+ rss->rxq_hw_index_min + 1,
+ EFX_MAXRSS);
+
+ rc = efx_rx_scale_context_alloc(sa->nic,
+ EFX_RX_SCALE_EXCLUSIVE,
+ rss_spread,
+ &spec->efs_rss_context);
+ if (rc != 0)
+ goto fail_scale_context_alloc;
+
+ rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
+ EFX_RX_HASHALG_TOEPLITZ,
+ rss->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
+ rss->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+
+ spec->efs_dmaq_id = rss->rxq_hw_index_min;
+ spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ }
+
+ rc = efx_filter_insert(sa->nic, spec);
+ if (rc != 0)
+ goto fail_filter_insert;
+
+ if (flow->rss) {
+ /*
+ * Scale table is set after filter insertion because
+ * the table entries are relative to the base RxQ ID
+ * and the latter is submitted to the HW by means of
+ * inserting a filter, so by the time of the request
+ * the HW knows all the information needed to verify
+ * the table entries, and the operation will succeed
+ */
+ rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
+ rss->rss_tbl, RTE_DIM(rss->rss_tbl));
+ if (rc != 0)
+ goto fail_scale_tbl_set;
+ }
+
+ return 0;
+
+fail_scale_tbl_set:
+ efx_filter_remove(sa->nic, spec);
+
+fail_filter_insert:
+fail_scale_key_set:
+fail_scale_mode_set:
+ if (rss != NULL)
+ efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+
+fail_scale_context_alloc:
+ return rc;
+#else /* !EFSYS_OPT_RX_SCALE */
+ return efx_filter_insert(sa->nic, spec);
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+static int
+sfc_flow_filter_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ efx_filter_spec_t *spec = &flow->spec;
+ int rc = 0;
+
+ rc = efx_filter_remove(sa->nic, spec);
+ if (rc != 0)
+ return rc;
+
+#if EFSYS_OPT_RX_SCALE
+ if (flow->rss)
+ rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return rc;
+}
+
static int
sfc_flow_parse_actions(struct sfc_adapter *sa,
const struct rte_flow_action actions[],
@@ -919,6 +1083,20 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
is_specified = B_TRUE;
break;
+#if EFSYS_OPT_RX_SCALE
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rc = sfc_flow_parse_rss(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad RSS action");
+ return -rte_errno;
+ }
+
+ is_specified = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCALE */
+
default:
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -1013,7 +1191,7 @@ sfc_flow_create(struct rte_eth_dev *dev,
sfc_adapter_lock(sa);
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_filter_insert(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_insert(sa, flow);
if (rc != 0) {
rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1047,7 +1225,7 @@ sfc_flow_remove(struct sfc_adapter *sa,
SFC_ASSERT(sfc_adapter_is_locked(sa));
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_filter_remove(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_remove(sa, flow);
if (rc != 0)
rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1172,7 +1350,7 @@ sfc_flow_stop(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
- efx_filter_remove(sa->nic, &flow->spec);
+ sfc_flow_filter_remove(sa, flow);
}
int
@@ -1186,7 +1364,7 @@ sfc_flow_start(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
- rc = efx_filter_insert(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_insert(sa, flow);
if (rc != 0)
goto fail_bad_flow;
}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index bfc34364..aa740d7d 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -41,9 +41,24 @@
extern "C" {
#endif
+#if EFSYS_OPT_RX_SCALE
+/* RSS configuration storage */
+struct sfc_flow_rss {
+ unsigned int rxq_hw_index_min;
+ unsigned int rxq_hw_index_max;
+ unsigned int rss_hash_types;
+ uint8_t rss_key[EFX_RSS_KEY_SIZE];
+ unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
+};
+#endif /* EFSYS_OPT_RX_SCALE */
+
/* PMD-specific definition of the opaque type from rte_flow.h */
struct rte_flow {
efx_filter_spec_t spec; /* filter specification */
+#if EFSYS_OPT_RX_SCALE
+ boolean_t rss; /* RSS toggle */
+ struct sfc_flow_rss rss_conf; /* RSS configuration */
+#endif /* EFSYS_OPT_RX_SCALE */
TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
};
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 1bf86445..2ae095b2 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -128,7 +128,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
- addr[i] = rte_pktmbuf_mtophys(m);
+ addr[i] = rte_pktmbuf_iova(m);
}
efx_rx_qpost(rxq->common, addr, rxq->buf_size,
@@ -207,11 +207,11 @@ sfc_efx_supported_ptypes_get(void)
return ptypes;
}
+#if EFSYS_OPT_RX_SCALE
static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
struct rte_mbuf *m)
{
-#if EFSYS_OPT_RX_SCALE
uint8_t *mbuf_data;
@@ -227,8 +227,15 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
m->ol_flags |= PKT_RX_RSS_HASH;
}
-#endif
}
+#else
+static void
+sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
+ __rte_unused unsigned int flags,
+ __rte_unused struct rte_mbuf *m)
+{
+}
+#endif
static uint16_t
sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
@@ -349,6 +356,43 @@ sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
return rxq->pending - rxq->completed;
}
+static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
+static int
+sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if (unlikely(offset > rxq->ptr_mask))
+ return -EINVAL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'rxq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
+ SFC_EFX_RXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(rxq->evq);
+
+ /*
+ * There is a handful of reserved entries in the ring,
+ * but an explicit check whether the offset points to
+ * a reserved entry is neglected since the two checks
+ * below rely on the figures which take the HW limits
+ * into account and thus if an entry is reserved, the
+ * checks will fail and UNAVAIL code will be returned
+ */
+
+ if (offset < (rxq->pending - rxq->completed))
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed))
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
@@ -498,6 +542,7 @@ struct sfc_dp_rx sfc_efx_rx = {
.qpurge = sfc_efx_rx_qpurge,
.supported_ptypes_get = sfc_efx_supported_ptypes_get,
.qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .qdesc_status = sfc_efx_rx_qdesc_status,
.pkt_burst = sfc_efx_recv_pkts,
};
@@ -1050,31 +1095,39 @@ sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
}
#endif
+#if EFSYS_OPT_RX_SCALE
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
int rc = 0;
-#if EFSYS_OPT_RX_SCALE
if (sa->rss_channels > 0) {
- rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ EFX_RX_HASHALG_TOEPLITZ,
sa->rss_hash_types, B_TRUE);
if (rc != 0)
goto finish;
- rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_key,
sizeof(sa->rss_key));
if (rc != 0)
goto finish;
- rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
- sizeof(sa->rss_tbl));
+ rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
+ sa->rss_tbl, RTE_DIM(sa->rss_tbl));
}
finish:
-#endif
return rc;
}
+#else
+static int
+sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
+{
+ return 0;
+}
+#endif
int
sfc_rx_start(struct sfc_adapter *sa)
@@ -1243,7 +1296,6 @@ sfc_rx_configure(struct sfc_adapter *sa)
{
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
- unsigned int sw_index;
int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
@@ -1296,6 +1348,8 @@ sfc_rx_configure(struct sfc_adapter *sa)
MIN(sa->rxq_count, EFX_MAXRSS) : 0;
if (sa->rss_channels > 0) {
+ unsigned int sw_index;
+
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
}
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index fb79d749..2e7b595b 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -141,7 +141,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
return EMSGSIZE;
- header_paddr = rte_pktmbuf_mtophys(m);
+ header_paddr = rte_pktmbuf_iova(m);
/*
* Sometimes headers may be split across multiple mbufs. In such cases
@@ -155,7 +155,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
- header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ header_paddr = rte_malloc_virt2iova((void *)tsoh);
} else {
if (m->data_len == header_len) {
*in_off = 0;
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
index 4ef7fc8b..fd2f75c3 100644
--- a/drivers/net/sfc/sfc_tweak.h
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -53,4 +53,7 @@
/** Default free threshold follows recommendations from DPDK documentation */
#define SFC_TX_DEFAULT_FREE_THRESH 32
+/** Number of mbufs to be freed in bulk in a single call */
+#define SFC_TX_REAP_BULK_SIZE 32
+
#endif /* _SFC_TWEAK_H_ */
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index fc439cb6..127d59e6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -91,6 +91,21 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
rc = EINVAL;
}
+ if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
+ sfc_err(sa, "multi-mempool is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
+ sfc_err(sa,
+ "mbuf reference counters are neglected by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
if (!encp->enc_hw_tx_insert_vlan_enabled) {
sfc_err(sa, "VLAN offload is not supported");
@@ -750,7 +765,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
size_t seg_len;
seg_len = m_seg->data_len;
- next_frag = rte_mbuf_data_dma_addr(m_seg);
+ next_frag = rte_mbuf_data_iova(m_seg);
/*
* If we've started TSO transaction few steps earlier,
@@ -977,6 +992,44 @@ sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
}
+static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
+static int
+sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ if (unlikely(offset > txq->ptr_mask))
+ return -EINVAL;
+
+ if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1)))
+ return RTE_ETH_TX_DESC_UNAVAIL;
+
+ /*
+ * Poll EvQ to derive up-to-date 'txq->pending' figure;
+ * it is required for the queue to be running, but the
+ * check is omitted because API design assumes that it
+ * is the duty of the caller to satisfy all conditions
+ */
+ SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
+ SFC_EFX_TXQ_FLAG_RUNNING);
+ sfc_ev_qpoll(txq->evq);
+
+ /*
+ * Ring tail is 'txq->pending', and although descriptors
+ * between 'txq->completed' and 'txq->pending' are still
+ * in use by the driver, they should be reported as DONE
+ */
+ if (unlikely(offset < (txq->added - txq->pending)))
+ return RTE_ETH_TX_DESC_FULL;
+
+ /*
+ * There is no separate return value for unused descriptors;
+ * the latter will be reported as DONE because genuine DONE
+ * descriptors will be freed anyway in SW on the next burst
+ */
+ return RTE_ETH_TX_DESC_DONE;
+}
+
struct sfc_dp_tx sfc_efx_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
@@ -985,11 +1038,14 @@ struct sfc_dp_tx sfc_efx_tx = {
},
.features = SFC_DP_TX_FEAT_VLAN_INSERT |
SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_POOL |
+ SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_SEG,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
.qstart = sfc_efx_tx_qstart,
.qstop = sfc_efx_tx_qstop,
.qreap = sfc_efx_tx_qreap,
+ .qdesc_status = sfc_efx_tx_qdesc_status,
.pkt_burst = sfc_efx_xmit_pkts,
};
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
new file mode 100644
index 00000000..09ed62ea
--- /dev/null
+++ b/drivers/net/softnic/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_softnic.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
+LDLIBS += -lrte_bus_vdev
+
+EXPORT_MAP := rte_pmd_eth_softnic_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_eth_softnic.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
new file mode 100644
index 00000000..3e47c2f9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -0,0 +1,851 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_bus_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_sched.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+#include "rte_eth_softnic_internals.h"
+
+#define DEV_HARD(p) \
+ (&rte_eth_devices[p->hard.port_id])
+
+#define PMD_PARAM_SOFT_TM "soft_tm"
+#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
+#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
+#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
+#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
+#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
+#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
+#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
+#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
+
+#define PMD_PARAM_HARD_NAME "hard_name"
+#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
+
+static const char *pmd_valid_args[] = {
+ PMD_PARAM_SOFT_TM,
+ PMD_PARAM_SOFT_TM_RATE,
+ PMD_PARAM_SOFT_TM_NB_QUEUES,
+ PMD_PARAM_SOFT_TM_QSIZE0,
+ PMD_PARAM_SOFT_TM_QSIZE1,
+ PMD_PARAM_SOFT_TM_QSIZE2,
+ PMD_PARAM_SOFT_TM_QSIZE3,
+ PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ PMD_PARAM_HARD_NAME,
+ PMD_PARAM_HARD_TX_QUEUE_ID,
+ NULL
+};
+
+static const struct rte_eth_dev_info pmd_dev_info = {
+ .min_rx_bufsize = 0,
+ .max_rx_pktlen = UINT32_MAX,
+ .max_rx_queues = UINT16_MAX,
+ .max_tx_queues = UINT16_MAX,
+ .rx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+ .tx_desc_lim = {
+ .nb_max = UINT16_MAX,
+ .nb_min = 0,
+ .nb_align = 1,
+ },
+};
+
+static void
+pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
+}
+
+static int
+pmd_dev_configure(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+
+ if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
+ return -1;
+
+ if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
+ return -1;
+
+ return 0;
+}
+
+static int
+pmd_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool __rte_unused)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (p->params.soft.intrusive == 0) {
+ struct pmd_rx_queue *rxq;
+
+ rxq = rte_zmalloc_socket(p->params.soft.name,
+ sizeof(struct pmd_rx_queue), 0, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+
+ rxq->hard.port_id = p->hard.port_id;
+ rxq->hard.rx_queue_id = rx_queue_id;
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ } else {
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+ void *rxq = hard_dev->data->rx_queues[rx_queue_id];
+
+ if (rxq == NULL)
+ return -1;
+
+ dev->data->rx_queues[rx_queue_id] = rxq;
+ }
+ return 0;
+}
+
+static int
+pmd_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
+ char name[size];
+ struct rte_ring *r;
+
+ snprintf(name, sizeof(name), "%s_txq%04x",
+ dev->data->name, tx_queue_id);
+ r = rte_ring_create(name, nb_tx_desc, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (r == NULL)
+ return -1;
+
+ dev->data->tx_queues[tx_queue_id] = r;
+ return 0;
+}
+
+static int
+pmd_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (tm_used(dev)) {
+ int status = tm_start(p);
+
+ if (status)
+ return status;
+ }
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ if (p->params.soft.intrusive) {
+ struct rte_eth_dev *hard_dev = DEV_HARD(p);
+
+ /* The hard_dev->rx_pkt_burst should be stable by now */
+ dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
+ }
+
+ return 0;
+}
+
+static void
+pmd_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (tm_used(dev))
+ tm_stop(p);
+}
+
+static void
+pmd_dev_close(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+
+ /* TX queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
+}
+
+static int
+pmd_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static int
+pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
+{
+ *(const struct rte_tm_ops **)arg =
+ (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
+
+ return 0;
+}
+
+static const struct eth_dev_ops pmd_ops = {
+ .dev_configure = pmd_dev_configure,
+ .dev_start = pmd_dev_start,
+ .dev_stop = pmd_dev_stop,
+ .dev_close = pmd_dev_close,
+ .link_update = pmd_link_update,
+ .dev_infos_get = pmd_dev_infos_get,
+ .rx_queue_setup = pmd_rx_queue_setup,
+ .tx_queue_setup = pmd_tx_queue_setup,
+ .tm_ops_get = pmd_tm_ops_get,
+};
+
+static uint16_t
+pmd_rx_pkt_burst(void *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct pmd_rx_queue *rx_queue = rxq;
+
+ return rte_eth_rx_burst(rx_queue->hard.port_id,
+ rx_queue->hard.rx_queue_id,
+ rx_pkts,
+ nb_pkts);
+}
+
+static uint16_t
+pmd_tx_pkt_burst(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ return (uint16_t)rte_ring_enqueue_burst(txq,
+ (void **)tx_pkts,
+ nb_pkts,
+ NULL);
+}
+
+static __rte_always_inline int
+run_default(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent context: Read Only (update not required) */
+ struct rte_mbuf **pkts = p->soft.def.pkts;
+ uint16_t nb_tx_queues = dev->data->nb_tx_queues;
+
+ /* Persistent context: Read - Write (update required) */
+ uint32_t txq_pos = p->soft.def.txq_pos;
+ uint32_t pkts_len = p->soft.def.pkts_len;
+ uint32_t flush_count = p->soft.def.flush_count;
+
+ /* Not part of the persistent context */
+ uint32_t pos;
+ uint16_t i;
+
+ /* Soft device TXQ read, Hard device TXQ write */
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct rte_ring *txq = dev->data->tx_queues[txq_pos];
+
+ /* Read soft device TXQ burst to packet enqueue buffer */
+ pkts_len += rte_ring_sc_dequeue_burst(txq,
+ (void **)&pkts[pkts_len],
+ DEFAULT_BURST_SIZE,
+ NULL);
+
+ /* Increment soft device TXQ */
+ txq_pos++;
+ if (txq_pos >= nb_tx_queues)
+ txq_pos = 0;
+
+ /* Hard device TXQ write when complete burst is available */
+ if (pkts_len >= DEFAULT_BURST_SIZE) {
+ for (pos = 0; pos < pkts_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts[pos],
+ (uint16_t)(pkts_len - pos));
+
+ pkts_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+
+ if (flush_count >= FLUSH_COUNT_THRESHOLD) {
+ for (pos = 0; pos < pkts_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts[pos],
+ (uint16_t)(pkts_len - pos));
+
+ pkts_len = 0;
+ flush_count = 0;
+ }
+
+ p->soft.def.txq_pos = txq_pos;
+ p->soft.def.pkts_len = pkts_len;
+ p->soft.def.flush_count = flush_count + 1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+run_tm(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* Persistent context: Read Only (update not required) */
+ struct rte_sched_port *sched = p->soft.tm.sched;
+ struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
+ struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
+ uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
+ uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
+ uint16_t nb_tx_queues = dev->data->nb_tx_queues;
+
+ /* Persistent context: Read - Write (update required) */
+ uint32_t txq_pos = p->soft.tm.txq_pos;
+ uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
+ uint32_t flush_count = p->soft.tm.flush_count;
+
+ /* Not part of the persistent context */
+ uint32_t pkts_deq_len, pos;
+ uint16_t i;
+
+ /* Soft device TXQ read, TM enqueue */
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct rte_ring *txq = dev->data->tx_queues[txq_pos];
+
+ /* Read TXQ burst to packet enqueue buffer */
+ pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
+ (void **)&pkts_enq[pkts_enq_len],
+ enq_bsz,
+ NULL);
+
+ /* Increment TXQ */
+ txq_pos++;
+ if (txq_pos >= nb_tx_queues)
+ txq_pos = 0;
+
+ /* TM enqueue when complete burst is available */
+ if (pkts_enq_len >= enq_bsz) {
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ break;
+ }
+ }
+
+ if (flush_count >= FLUSH_COUNT_THRESHOLD) {
+ if (pkts_enq_len)
+ rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
+
+ pkts_enq_len = 0;
+ flush_count = 0;
+ }
+
+ p->soft.tm.txq_pos = txq_pos;
+ p->soft.tm.pkts_enq_len = pkts_enq_len;
+ p->soft.tm.flush_count = flush_count + 1;
+
+ /* TM dequeue, Hard device TXQ write */
+ pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
+
+ for (pos = 0; pos < pkts_deq_len; )
+ pos += rte_eth_tx_burst(p->hard.port_id,
+ p->params.hard.tx_queue_id,
+ &pkts_deq[pos],
+ (uint16_t)(pkts_deq_len - pos));
+
+ return 0;
+}
+
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
+}
+
+static struct ether_addr eth_addr = { .addr_bytes = {0} };
+
+static uint32_t
+eth_dev_speed_max_mbps(uint32_t speed_capa)
+{
+ uint32_t rate_mbps[32] = {
+ ETH_SPEED_NUM_NONE,
+ ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_10M,
+ ETH_SPEED_NUM_100M,
+ ETH_SPEED_NUM_100M,
+ ETH_SPEED_NUM_1G,
+ ETH_SPEED_NUM_2_5G,
+ ETH_SPEED_NUM_5G,
+ ETH_SPEED_NUM_10G,
+ ETH_SPEED_NUM_20G,
+ ETH_SPEED_NUM_25G,
+ ETH_SPEED_NUM_40G,
+ ETH_SPEED_NUM_50G,
+ ETH_SPEED_NUM_56G,
+ ETH_SPEED_NUM_100G,
+ };
+
+ uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
+ return rate_mbps[pos];
+}
+
+static int
+default_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
+ 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.def.pkts == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+default_free(struct pmd_internals *p)
+{
+ rte_free(p->soft.def.pkts);
+}
+
+static void *
+pmd_init(struct pmd_params *params, int numa_node)
+{
+ struct pmd_internals *p;
+ int status;
+
+ p = rte_zmalloc_socket(params->soft.name,
+ sizeof(struct pmd_internals),
+ 0,
+ numa_node);
+ if (p == NULL)
+ return NULL;
+
+ memcpy(&p->params, params, sizeof(p->params));
+ rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
+
+ /* Default */
+ status = default_init(p, params, numa_node);
+ if (status) {
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Traffic Management (TM)*/
+ if (params->soft.flags & PMD_FEATURE_TM) {
+ status = tm_init(p, params, numa_node);
+ if (status) {
+ default_free(p);
+ free(p->params.hard.name);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ return p;
+}
+
+static void
+pmd_free(struct pmd_internals *p)
+{
+ if (p->params.soft.flags & PMD_FEATURE_TM)
+ tm_free(p);
+
+ default_free(p);
+
+ free(p->params.hard.name);
+ rte_free(p);
+}
+
+static int
+pmd_ethdev_register(struct rte_vdev_device *vdev,
+ struct pmd_params *params,
+ void *dev_private)
+{
+ struct rte_eth_dev_info hard_info;
+ struct rte_eth_dev *soft_dev;
+ uint32_t hard_speed;
+ int numa_node;
+ uint16_t hard_port_id;
+
+ rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
+ rte_eth_dev_info_get(hard_port_id, &hard_info);
+ hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
+ numa_node = rte_eth_dev_socket_id(hard_port_id);
+
+ /* Ethdev entry allocation */
+ soft_dev = rte_eth_dev_allocate(params->soft.name);
+ if (!soft_dev)
+ return -ENOMEM;
+
+ /* dev */
+ soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
+ NULL : /* set up later */
+ pmd_rx_pkt_burst;
+ soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
+ soft_dev->tx_pkt_prepare = NULL;
+ soft_dev->dev_ops = &pmd_ops;
+ soft_dev->device = &vdev->device;
+
+ /* dev->data */
+ soft_dev->data->dev_private = dev_private;
+ soft_dev->data->dev_link.link_speed = hard_speed;
+ soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ soft_dev->data->mac_addrs = &eth_addr;
+ soft_dev->data->promiscuous = 1;
+ soft_dev->data->kdrv = RTE_KDRV_NONE;
+ soft_dev->data->numa_node = numa_node;
+
+ return 0;
+}
+
+static int
+get_string(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint32_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ int i, ret;
+
+ kvlist = rte_kvargs_parse(params, pmd_valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Set default values */
+ memset(p, 0, sizeof(*p));
+ p->soft.name = name;
+ p->soft.intrusive = INTRUSIVE;
+ p->soft.tm.rate = 0;
+ p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
+ p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
+ p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
+ p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
+
+ /* SOFT: TM (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
+ char *s;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
+ &get_string, &s);
+ if (ret < 0)
+ goto out_free;
+
+ if (strcmp(s, "on") == 0)
+ p->soft.flags |= PMD_FEATURE_TM;
+ else if (strcmp(s, "off") == 0)
+ p->soft.flags &= ~PMD_FEATURE_TM;
+ else
+ ret = -EINVAL;
+
+ free(s);
+ if (ret)
+ goto out_free;
+ }
+
+ /* SOFT: TM rate (measured in bytes/second) (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
+ &get_uint32, &p->soft.tm.rate);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
+ &get_uint32, &p->soft.tm.nb_queues);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[0] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[1] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[2] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
+ uint32_t qsize;
+
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
+ &get_uint32, &qsize);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.tm.qsize[3] = (uint16_t)qsize;
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM enqueue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
+ &get_uint32, &p->soft.tm.enq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* SOFT: TM dequeue burst size (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
+ &get_uint32, &p->soft.tm.deq_bsz);
+ if (ret < 0)
+ goto out_free;
+
+ p->soft.flags |= PMD_FEATURE_TM;
+ }
+
+ /* HARD: name (mandatory) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
+ &get_string, &p->hard.name);
+ if (ret < 0)
+ goto out_free;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* HARD: tx_queue_id (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
+ &get_uint32, &p->hard.tx_queue_id);
+ if (ret < 0)
+ goto out_free;
+ }
+
+out_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+pmd_probe(struct rte_vdev_device *vdev)
+{
+ struct pmd_params p;
+ const char *params;
+ int status;
+
+ struct rte_eth_dev_info hard_info;
+ uint32_t hard_speed;
+ uint16_t hard_port_id;
+ int numa_node;
+ void *dev_private;
+
+ RTE_LOG(INFO, PMD,
+ "Probing device \"%s\"\n",
+ rte_vdev_device_name(vdev));
+
+ /* Parse input arguments */
+ params = rte_vdev_device_args(vdev);
+ if (!params)
+ return -EINVAL;
+
+ status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
+ if (status)
+ return status;
+
+ /* Check input arguments */
+ if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
+ return -EINVAL;
+
+ rte_eth_dev_info_get(hard_port_id, &hard_info);
+ hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
+ numa_node = rte_eth_dev_socket_id(hard_port_id);
+
+ if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
+ return -EINVAL;
+
+ if (p.soft.flags & PMD_FEATURE_TM) {
+ status = tm_params_check(&p, hard_speed);
+
+ if (status)
+ return status;
+ }
+
+ /* Allocate and initialize soft ethdev private data */
+ dev_private = pmd_init(&p, numa_node);
+ if (dev_private == NULL)
+ return -ENOMEM;
+
+ /* Register soft ethdev */
+ RTE_LOG(INFO, PMD,
+ "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
+ p.soft.name, p.hard.name);
+
+ status = pmd_ethdev_register(vdev, &p, dev_private);
+ if (status) {
+ pmd_free(dev_private);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+pmd_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *dev = NULL;
+ struct pmd_internals *p;
+
+ if (!vdev)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
+ rte_vdev_device_name(vdev));
+
+ /* Find the ethdev entry */
+ dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
+ if (dev == NULL)
+ return -ENODEV;
+ p = dev->data->dev_private;
+
+ /* Free device data structures*/
+ pmd_free(p);
+ rte_free(dev->data);
+ rte_eth_dev_release_port(dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_softnic_drv = {
+ .probe = pmd_probe,
+ .remove = pmd_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
+ PMD_PARAM_SOFT_TM "=on|off "
+ PMD_PARAM_SOFT_TM_RATE "=<int> "
+ PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
+ PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
+ PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
+ PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
+ PMD_PARAM_HARD_NAME "=<string> "
+ PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
new file mode 100644
index 00000000..b49e5829
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_H__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_NB_QUEUES
+#define SOFTNIC_SOFT_TM_NB_QUEUES 65536
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE
+#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ
+#define SOFTNIC_SOFT_TM_ENQ_BSZ 32
+#endif
+
+#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ
+#define SOFTNIC_SOFT_TM_DEQ_BSZ 24
+#endif
+
+#ifndef SOFTNIC_HARD_TX_QUEUE_ID
+#define SOFTNIC_HARD_TX_QUEUE_ID 0
+#endif
+
+/**
+ * Run the traffic management function on the softnic device
+ *
+ * This function read the packets from the softnic input queues, insert into
+ * QoS scheduler queues based on mbuf sched field value and transmit the
+ * scheduled packets out through the hard device interface.
+ *
+ * @param portid
+ * port id of the soft device.
+ * @return
+ * zero.
+ */
+
+int
+rte_pmd_softnic_run(uint16_t port_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
new file mode 100644
index 00000000..1f758069
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -0,0 +1,291 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_sched.h>
+#include <rte_ethdev.h>
+#include <rte_tm_driver.h>
+
+#include "rte_eth_softnic.h"
+
+/**
+ * PMD Parameters
+ */
+
+enum pmd_feature {
+ PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
+};
+
+#ifndef INTRUSIVE
+#define INTRUSIVE 0
+#endif
+
+struct pmd_params {
+ /** Parameters for the soft device (to be created) */
+ struct {
+ const char *name; /**< Name */
+ uint32_t flags; /**< Flags */
+
+ /** 0 = Access hard device though API only (potentially slower,
+ * but safer);
+ * 1 = Access hard device private data structures is allowed
+ * (potentially faster).
+ */
+ int intrusive;
+
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t rate; /**< Rate (bytes/second) */
+ uint32_t nb_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ /**< Queue size per traffic class */
+ uint32_t enq_bsz; /**< Enqueue burst size */
+ uint32_t deq_bsz; /**< Dequeue burst size */
+ } tm;
+ } soft;
+
+ /** Parameters for the hard device (existing) */
+ struct {
+ char *name; /**< Name */
+ uint16_t tx_queue_id; /**< TX queue ID */
+ } hard;
+};
+
+/**
+ * Default Internals
+ */
+
+#ifndef DEFAULT_BURST_SIZE
+#define DEFAULT_BURST_SIZE 32
+#endif
+
+#ifndef FLUSH_COUNT_THRESHOLD
+#define FLUSH_COUNT_THRESHOLD (1 << 17)
+#endif
+
+struct default_internals {
+ struct rte_mbuf **pkts;
+ uint32_t pkts_len;
+ uint32_t txq_pos;
+ uint32_t flush_count;
+};
+
+/**
+ * Traffic Management (TM) Internals
+ */
+
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS 8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT 4096
+#endif
+
+struct tm_params {
+ struct rte_sched_port_params port_params;
+
+ struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+
+ struct rte_sched_pipe_params
+ pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ uint32_t n_pipe_profiles;
+ uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
+/* TM Levels */
+enum tm_node_level {
+ TM_NODE_LEVEL_PORT = 0,
+ TM_NODE_LEVEL_SUBPORT,
+ TM_NODE_LEVEL_PIPE,
+ TM_NODE_LEVEL_TC,
+ TM_NODE_LEVEL_QUEUE,
+ TM_NODE_LEVEL_MAX,
+};
+
+/* TM Shaper Profile */
+struct tm_shaper_profile {
+ TAILQ_ENTRY(tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t n_users;
+ struct rte_tm_shaper_params params;
+};
+
+TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
+
+/* TM Shared Shaper */
+struct tm_shared_shaper {
+ TAILQ_ENTRY(tm_shared_shaper) node;
+ uint32_t shared_shaper_id;
+ uint32_t n_users;
+ uint32_t shaper_profile_id;
+};
+
+TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
+
+/* TM WRED Profile */
+struct tm_wred_profile {
+ TAILQ_ENTRY(tm_wred_profile) node;
+ uint32_t wred_profile_id;
+ uint32_t n_users;
+ struct rte_tm_wred_params params;
+};
+
+TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
+
+/* TM Node */
+struct tm_node {
+ TAILQ_ENTRY(tm_node) node;
+ uint32_t node_id;
+ uint32_t parent_node_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t level;
+ struct tm_node *parent_node;
+ struct tm_shaper_profile *shaper_profile;
+ struct tm_wred_profile *wred_profile;
+ struct rte_tm_node_params params;
+ struct rte_tm_node_stats stats;
+ uint32_t n_children;
+};
+
+TAILQ_HEAD(tm_node_list, tm_node);
+
+/* TM Hierarchy Specification */
+struct tm_hierarchy {
+ struct tm_shaper_profile_list shaper_profiles;
+ struct tm_shared_shaper_list shared_shapers;
+ struct tm_wred_profile_list wred_profiles;
+ struct tm_node_list nodes;
+
+ uint32_t n_shaper_profiles;
+ uint32_t n_shared_shapers;
+ uint32_t n_wred_profiles;
+ uint32_t n_nodes;
+
+ uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
+};
+
+struct tm_internals {
+ /** Hierarchy specification
+ *
+ * -Hierarchy is unfrozen at init and when port is stopped.
+ * -Hierarchy is frozen on successful hierarchy commit.
+ * -Run-time hierarchy changes are not allowed, therefore it makes
+ * sense to keep the hierarchy frozen after the port is started.
+ */
+ struct tm_hierarchy h;
+ int hierarchy_frozen;
+
+ /** Blueprints */
+ struct tm_params params;
+
+ /** Run-time */
+ struct rte_sched_port *sched;
+ struct rte_mbuf **pkts_enq;
+ struct rte_mbuf **pkts_deq;
+ uint32_t pkts_enq_len;
+ uint32_t txq_pos;
+ uint32_t flush_count;
+};
+
+/**
+ * PMD Internals
+ */
+struct pmd_internals {
+ /** Params */
+ struct pmd_params params;
+
+ /** Soft device */
+ struct {
+ struct default_internals def; /**< Default */
+ struct tm_internals tm; /**< Traffic Management */
+ } soft;
+
+ /** Hard device */
+ struct {
+ uint16_t port_id;
+ } hard;
+};
+
+struct pmd_rx_queue {
+ /** Hard device */
+ struct {
+ uint16_t port_id;
+ uint16_t rx_queue_id;
+ } hard;
+};
+
+/**
+ * Traffic Management (TM) Operation
+ */
+extern const struct rte_tm_ops pmd_tm_ops;
+
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate);
+
+int
+tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+
+void
+tm_free(struct pmd_internals *p);
+
+int
+tm_start(struct pmd_internals *p);
+
+void
+tm_stop(struct pmd_internals *p);
+
+static inline int
+tm_enabled(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_TM);
+}
+
+static inline int
+tm_used(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ return (p->params.soft.flags & PMD_FEATURE_TM) &&
+ p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+}
+
+#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
new file mode 100644
index 00000000..dbb25143
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -0,0 +1,3452 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define BYTES_IN_MBPS (1000 * 1000 / 8)
+#define SUBPORT_TC_PERIOD 10
+#define PIPE_TC_PERIOD 40
+
+int
+tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+{
+ uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
+ uint32_t i;
+
+ /* rate */
+ if (params->soft.tm.rate) {
+ if (params->soft.tm.rate > hard_rate_bytes_per_sec)
+ return -EINVAL;
+ } else {
+ params->soft.tm.rate =
+ (hard_rate_bytes_per_sec > UINT32_MAX) ?
+ UINT32_MAX : hard_rate_bytes_per_sec;
+ }
+
+ /* nb_queues */
+ if (params->soft.tm.nb_queues == 0)
+ return -EINVAL;
+
+ if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
+ params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+
+ params->soft.tm.nb_queues =
+ rte_align32pow2(params->soft.tm.nb_queues);
+
+ /* qsize */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (params->soft.tm.qsize[i] == 0)
+ return -EINVAL;
+
+ params->soft.tm.qsize[i] =
+ rte_align32pow2(params->soft.tm.qsize[i]);
+ }
+
+ /* enq_bsz, deq_bsz */
+ if (params->soft.tm.enq_bsz == 0 ||
+ params->soft.tm.deq_bsz == 0 ||
+ params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+tm_hierarchy_init(struct pmd_internals *p)
+{
+ memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+
+ /* Initialize shaper profile list */
+ TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
+
+ /* Initialize shared shaper list */
+ TAILQ_INIT(&p->soft.tm.h.shared_shapers);
+
+ /* Initialize wred profile list */
+ TAILQ_INIT(&p->soft.tm.h.wred_profiles);
+
+ /* Initialize TM node list */
+ TAILQ_INIT(&p->soft.tm.h.nodes);
+}
+
+static void
+tm_hierarchy_uninit(struct pmd_internals *p)
+{
+ /* Remove all nodes*/
+ for ( ; ; ) {
+ struct tm_node *tm_node;
+
+ tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
+ if (tm_node == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
+ free(tm_node);
+ }
+
+ /* Remove all WRED profiles */
+ for ( ; ; ) {
+ struct tm_wred_profile *wred_profile;
+
+ wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
+ if (wred_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
+ free(wred_profile);
+ }
+
+ /* Remove all shared shapers */
+ for ( ; ; ) {
+ struct tm_shared_shaper *shared_shaper;
+
+ shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
+ if (shared_shaper == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
+ free(shared_shaper);
+ }
+
+ /* Remove all shaper profiles */
+ for ( ; ; ) {
+ struct tm_shaper_profile *shaper_profile;
+
+ shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
+ if (shaper_profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
+ shaper_profile, node);
+ free(shaper_profile);
+ }
+
+ memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+}
+
+int
+tm_init(struct pmd_internals *p,
+ struct pmd_params *params,
+ int numa_node)
+{
+ uint32_t enq_bsz = params->soft.tm.enq_bsz;
+ uint32_t deq_bsz = params->soft.tm.deq_bsz;
+
+ p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
+ 2 * enq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_enq == NULL)
+ return -ENOMEM;
+
+ p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
+ deq_bsz * sizeof(struct rte_mbuf *),
+ 0,
+ numa_node);
+
+ if (p->soft.tm.pkts_deq == NULL) {
+ rte_free(p->soft.tm.pkts_enq);
+ return -ENOMEM;
+ }
+
+ tm_hierarchy_init(p);
+
+ return 0;
+}
+
+void
+tm_free(struct pmd_internals *p)
+{
+ tm_hierarchy_uninit(p);
+ rte_free(p->soft.tm.pkts_enq);
+ rte_free(p->soft.tm.pkts_deq);
+}
+
+int
+tm_start(struct pmd_internals *p)
+{
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return -1;
+
+ /* Port */
+ p->soft.tm.sched = rte_sched_port_config(&t->port_params);
+ if (p->soft.tm.sched == NULL)
+ return -1;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport =
+ t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+
+ status = rte_sched_subport_config(p->soft.tm.sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+
+ /* Pipe */
+ n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
+ pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(p->soft.tm.sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(p->soft.tm.sched);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+tm_stop(struct pmd_internals *p)
+{
+ if (p->soft.tm.sched)
+ rte_sched_port_free(p->soft.tm.sched);
+
+ /* Unfreeze hierarchy */
+ p->soft.tm.hierarchy_frozen = 0;
+}
+
+static struct tm_shaper_profile *
+tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+
+ TAILQ_FOREACH(sp, spl, node)
+ if (shaper_profile_id == sp->shaper_profile_id)
+ return sp;
+
+ return NULL;
+}
+
+static struct tm_shared_shaper *
+tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
+ struct tm_shared_shaper *ss;
+
+ TAILQ_FOREACH(ss, ssl, node)
+ if (shared_shaper_id == ss->shared_shaper_id)
+ return ss;
+
+ return NULL;
+}
+
+static struct tm_wred_profile *
+tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wred_profile_id == wp->wred_profile_id)
+ return wp;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->node_id == node_id)
+ return n;
+
+ return NULL;
+}
+
+static struct tm_node *
+tm_root_node_present(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node)
+ if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
+ return n;
+
+ return NULL;
+}
+
+static uint32_t
+tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *ns;
+ uint32_t subport_id;
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->node_id == subport_node->node_id)
+ return subport_id;
+
+ subport_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *np;
+ uint32_t pipe_id;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != pipe_node->parent_node_id)
+ continue;
+
+ if (np->node_id == pipe_node->node_id)
+ return pipe_id;
+
+ pipe_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
+{
+ return tc_node->priority;
+}
+
+static uint32_t
+tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *nq;
+ uint32_t queue_id;
+
+ queue_id = 0;
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != queue_node->parent_node_id)
+ continue;
+
+ if (nq->node_id == queue_node->node_id)
+ return queue_id;
+
+ queue_id++;
+ }
+
+ return UINT32_MAX;
+}
+
+static uint32_t
+tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+ uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+ uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_subports_max = n_pipes_max;
+ uint32_t n_root_max = 1;
+
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ return n_root_max;
+ case TM_NODE_LEVEL_SUBPORT:
+ return n_subports_max;
+ case TM_NODE_LEVEL_PIPE:
+ return n_pipes_max;
+ case TM_NODE_LEVEL_TC:
+ return n_tc_max;
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ return n_queues_max;
+ }
+}
+
+/* Traffic manager node type get */
+static int
+pmd_tm_node_type_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ if (is_leaf == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (node_id == RTE_TM_NODE_ID_NULL ||
+ (tm_node_search(dev, node_id) == NULL))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ *is_leaf = node_id < p->params.soft.tm.nb_queues;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_RED
+#define WRED_SUPPORTED 1
+#else
+#define WRED_SUPPORTED 0
+#endif
+
+#define STATS_MASK_DEFAULT \
+ (RTE_TM_STATS_N_PKTS | \
+ RTE_TM_STATS_N_BYTES | \
+ RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
+ RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE \
+ (STATS_MASK_DEFAULT | \
+ RTE_TM_STATS_N_PKTS_QUEUED)
+
+static const struct rte_tm_capabilities tm_cap = {
+ .n_nodes_max = UINT32_MAX,
+ .n_levels_max = TM_NODE_LEVEL_MAX,
+
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ .shaper_n_max = UINT32_MAX,
+ .shaper_private_n_max = UINT32_MAX,
+ .shaper_private_dual_rate_n_max = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+
+ .shaper_shared_n_max = UINT32_MAX,
+ .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
+ .shaper_shared_n_shapers_per_node_max = 1,
+ .shaper_shared_dual_rate_n_max = 0,
+ .shaper_shared_rate_min = 1,
+ .shaper_shared_rate_max = UINT32_MAX,
+
+ .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+ .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_n_max = 0,
+ .cman_wred_context_private_n_max = 0,
+ .cman_wred_context_shared_n_max = 0,
+ .cman_wred_context_shared_n_nodes_per_context_max = 0,
+ .cman_wred_context_shared_n_contexts_per_node_max = 0,
+
+ .mark_vlan_dei_supported = {0, 0, 0},
+ .mark_ip_ecn_tcp_supported = {0, 0, 0},
+ .mark_ip_ecn_sctp_supported = {0, 0, 0},
+ .mark_ip_dscp_supported = {0, 0, 0},
+
+ .dynamic_update_mask = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+};
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_cap, sizeof(*cap));
+
+ cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->shaper_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
+
+ cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
+
+ cap->shaper_n_max = cap->shaper_private_n_max +
+ cap->shaper_shared_n_max;
+
+ cap->shaper_shared_n_nodes_per_shaper_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
+
+ cap->sched_n_children_max = RTE_MAX(
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
+ (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
+
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+
+ if (WRED_SUPPORTED)
+ cap->cman_wred_context_private_n_max =
+ tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+ cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
+ cap->cman_wred_context_shared_n_max;
+
+ return 0;
+}
+
+static const struct rte_tm_level_capabilities tm_level_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .n_nodes_max = 1,
+ .n_nodes_nonleaf_max = 1,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ .sched_wfq_weight_max = UINT32_MAX,
+#else
+ .sched_wfq_weight_max = 1,
+#endif
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = UINT32_MAX,
+ .n_nodes_leaf_max = 0,
+ .non_leaf_nodes_identical = 1,
+ .leaf_nodes_identical = 0,
+
+ .nonleaf = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .n_nodes_max = UINT32_MAX,
+ .n_nodes_nonleaf_max = 0,
+ .n_nodes_leaf_max = UINT32_MAX,
+ .non_leaf_nodes_identical = 0,
+ .leaf_nodes_identical = 1,
+
+ .leaf = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+
+ .stats_mask = STATS_MASK_QUEUE,
+ },
+ },
+};
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (level_id >= TM_NODE_LEVEL_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
+
+ switch (level_id) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_TC);
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ cap->n_nodes_max = tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_QUEUE);
+ cap->n_nodes_leaf_max = cap->n_nodes_max;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct rte_tm_node_capabilities tm_node_cap[] = {
+ [TM_NODE_LEVEL_PORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = 1,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_SUBPORT] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max = UINT32_MAX,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max = UINT32_MAX,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_PIPE] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 0,
+
+ .nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_sp_n_priorities_max =
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ .sched_wfq_n_children_per_group_max = 1,
+ .sched_wfq_n_groups_max = 0,
+ .sched_wfq_weight_max = 1,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_TC] = {
+ .shaper_private_supported = 1,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 1,
+ .shaper_private_rate_max = UINT32_MAX,
+ .shaper_shared_n_max = 1,
+
+ .nonleaf = {
+ .sched_n_children_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_sp_n_priorities_max = 1,
+ .sched_wfq_n_children_per_group_max =
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ .sched_wfq_n_groups_max = 1,
+ .sched_wfq_weight_max = UINT32_MAX,
+ },
+
+ .stats_mask = STATS_MASK_DEFAULT,
+ },
+
+ [TM_NODE_LEVEL_QUEUE] = {
+ .shaper_private_supported = 0,
+ .shaper_private_dual_rate_supported = 0,
+ .shaper_private_rate_min = 0,
+ .shaper_private_rate_max = 0,
+ .shaper_shared_n_max = 0,
+
+
+ .leaf = {
+ .cman_head_drop_supported = 0,
+ .cman_wred_context_private_supported = WRED_SUPPORTED,
+ .cman_wred_context_shared_n_max = 0,
+ },
+
+ .stats_mask = STATS_MASK_QUEUE,
+ },
+};
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct tm_node *tm_node;
+
+ if (cap == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ tm_node = tm_node_search(dev, node_id);
+ if (tm_node == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
+
+ switch (tm_node->level) {
+ case TM_NODE_LEVEL_PORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_SUBPORT);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ cap->nonleaf.sched_n_children_max =
+ tm_level_get_max_nodes(dev,
+ TM_NODE_LEVEL_PIPE);
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ case TM_NODE_LEVEL_TC:
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+shaper_profile_check(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_shaper_profile *sp;
+
+ /* Shaper profile ID must not be NONE. */
+ if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must not exist. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak rate: non-zero, 32-bit */
+ if (profile->peak.rate == 0 ||
+ profile->peak.rate >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Peak size: non-zero, 32-bit */
+ if (profile->peak.size == 0 ||
+ profile->peak.size >= UINT32_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Dual-rate profiles are not supported. */
+ if (profile->committed.rate != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Packet length adjust: 24 bytes */
+ if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shaper profile add */
+static int
+pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
+ struct tm_shaper_profile *sp;
+ int status;
+
+ /* Check input params */
+ status = shaper_profile_check(dev, shaper_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ sp = calloc(1, sizeof(struct tm_shaper_profile));
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ sp->shaper_profile_id = shaper_profile_id;
+ memcpy(&sp->params, profile, sizeof(sp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(spl, sp, node);
+ p->soft.tm.h.n_shaper_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp;
+
+ /* Check existing */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (sp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
+ p->soft.tm.h.n_shaper_profiles--;
+ free(sp);
+
+ return 0;
+}
+
+static struct tm_node *
+tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
+ struct tm_shared_shaper *ss)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ /* Subport: each TC uses shared shaper */
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->params.n_shared_shapers == 0 ||
+ n->params.shared_shaper_id[0] != ss->shared_shaper_id)
+ continue;
+
+ return n;
+ }
+
+ return NULL;
+}
+
+static int
+update_subport_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shared_shaper *ss,
+ struct tm_shaper_profile *sp_new)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
+ ss->shaper_profile_id);
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(p->soft.tm.sched,
+ subport_id, &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ sp_old->n_users--;
+
+ ss->shaper_profile_id = sp_new->shaper_profile_id;
+ sp_new->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper add/update */
+static int
+pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+ struct tm_node *nt;
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * Add new shared shaper
+ */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL) {
+ struct tm_shared_shaper_list *ssl =
+ &p->soft.tm.h.shared_shapers;
+
+ /* Hierarchy must not be frozen */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Memory allocation */
+ ss = calloc(1, sizeof(struct tm_shared_shaper));
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ ss->shared_shaper_id = shared_shaper_id;
+ ss->shaper_profile_id = shaper_profile_id;
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(ssl, ss, node);
+ p->soft.tm.h.n_shared_shapers++;
+
+ return 0;
+ }
+
+ /**
+ * Update existing shared shaper
+ */
+ /* Hierarchy must be frozen (run-time update) */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+
+ /* Propagate change. */
+ nt = tm_shared_shaper_get_tc(dev, ss);
+ if (update_subport_tc_rate(dev, nt, ss, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+/* Traffic manager shared shaper delete */
+static int
+pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
+ uint32_t shared_shaper_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shared_shaper *ss;
+
+ /* Check existing */
+ ss = tm_shared_shaper_search(dev, shared_shaper_id);
+ if (ss == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (ss->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
+ p->soft.tm.h.n_shared_shapers--;
+ free(ss);
+
+ return 0;
+}
+
+static int
+wred_profile_check(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct tm_wred_profile *wp;
+ enum rte_tm_color color;
+
+ /* WRED profile ID must not be NONE. */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WRED profile must not exist. */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp)
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* min_th <= max_th, max_th > 0 */
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ uint16_t min_th = profile->red_params[color].min_th;
+ uint16_t max_th = profile->red_params[color].max_th;
+
+ if (min_th > max_th || max_th == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager WRED profile add */
+static int
+pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_wred_params *profile,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
+ struct tm_wred_profile *wp;
+ int status;
+
+ /* Check input params */
+ status = wred_profile_check(dev, wred_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ wp = calloc(1, sizeof(struct tm_wred_profile));
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ wp->wred_profile_id = wred_profile_id;
+ memcpy(&wp->params, profile, sizeof(wp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(wpl, wp, node);
+ p->soft.tm.h.n_wred_profiles++;
+
+ return 0;
+}
+
+/* Traffic manager WRED profile delete */
+static int
+pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
+ uint32_t wred_profile_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_wred_profile *wp;
+
+ /* Check existing */
+ wp = tm_wred_profile_search(dev, wred_profile_id);
+ if (wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (wp->n_users)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
+ p->soft.tm.h.n_wred_profiles--;
+ free(wp);
+
+ return 0;
+}
+
+static int
+node_add_check_port(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid.
+ * Shaper profile peak rate must fit the configured port rate.
+ */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ sp == NULL ||
+ sp->params.peak.rate > p->params.soft.tm.rate)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_subport(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_pipe(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of SP priorities must be 4 */
+ if (params->nonleaf.n_sp_priorities !=
+ RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* WFQ mode must be byte mode */
+ if (params->nonleaf.wfq_weight_mode != NULL &&
+ params->nonleaf.wfq_weight_mode[0] != 0 &&
+ params->nonleaf.wfq_weight_mode[1] != 0 &&
+ params->nonleaf.wfq_weight_mode[2] != 0 &&
+ params->nonleaf.wfq_weight_mode[3] != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_tc(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority __rte_unused,
+ uint32_t weight,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: non-leaf */
+ if (node_id < p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Weight must be 1 */
+ if (weight != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper must be valid */
+ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
+ (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Single valid shared shaper */
+ if (params->n_shared_shapers > 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (params->n_shared_shapers == 1 &&
+ (params->shared_shaper_id == NULL ||
+ (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of priorities must be 1 */
+ if (params->nonleaf.n_sp_priorities != 1)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_DEFAULT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check_queue(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id __rte_unused,
+ uint32_t priority,
+ uint32_t weight __rte_unused,
+ uint32_t level_id __rte_unused,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ /* node type: leaf */
+ if (node_id >= p->params.soft.tm.nb_queues)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be 0 */
+ if (priority != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shaper */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared shapers */
+ if (params->n_shared_shapers != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management must not be head drop */
+ if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Congestion management set to WRED */
+ if (params->leaf.cman == RTE_TM_CMAN_WRED) {
+ uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
+ struct tm_wred_profile *wp = tm_wred_profile_search(dev,
+ wred_profile_id);
+
+ /* WRED profile (for private WRED context) must be valid */
+ if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
+ wp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* No shared WRED contexts */
+ if (params->leaf.wred.n_shared_wred_contexts != 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Stats */
+ if (params->stats_mask & ~STATS_MASK_QUEUE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ return 0;
+}
+
+static int
+node_add_check(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct tm_node *pn;
+ uint32_t level;
+ int status;
+
+ /* node_id, parent_node_id:
+ * -node_id must not be RTE_TM_NODE_ID_NULL
+ * -node_id must not be in use
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -root node must not exist
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -parent_node_id must be valid
+ */
+ if (node_id == RTE_TM_NODE_ID_NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ if (tm_node_search(dev, node_id))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ pn = NULL;
+ if (tm_root_node_present(dev))
+ return -rte_tm_error_set(error,
+ EEXIST,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EEXIST));
+ } else {
+ pn = tm_node_search(dev, parent_node_id);
+ if (pn == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* priority: must be 0 .. 3 */
+ if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* level_id: if valid, then
+ * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
+ * -level_id must be zero
+ * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
+ * -level_id must be parent level ID plus one
+ */
+ level = (pn == NULL) ? 0 : pn->level + 1;
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: must not be NULL */
+ if (params == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* params: per level checks */
+ switch (level) {
+ case TM_NODE_LEVEL_PORT:
+ status = node_add_check_port(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ status = node_add_check_subport(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_PIPE:
+ status = node_add_check_pipe(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_TC:
+ status = node_add_check_tc(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ case TM_NODE_LEVEL_QUEUE:
+ status = node_add_check_queue(dev, node_id,
+ parent_node_id, priority, weight, level_id,
+ params, error);
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ return 0;
+}
+
+/* Traffic manager node add */
+static int
+pmd_tm_node_add(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+ uint32_t i;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = node_add_check(dev, node_id, parent_node_id, priority, weight,
+ level_id, params, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ n = calloc(1, sizeof(struct tm_node));
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ ENOMEM,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOMEM));
+
+ /* Fill in */
+ n->node_id = node_id;
+ n->parent_node_id = parent_node_id;
+ n->priority = priority;
+ n->weight = weight;
+
+ if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+ n->parent_node = tm_node_search(dev, parent_node_id);
+ n->level = n->parent_node->level + 1;
+ }
+
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+ n->shaper_profile = tm_shaper_profile_search(dev,
+ params->shaper_profile_id);
+
+ if (n->level == TM_NODE_LEVEL_QUEUE &&
+ params->leaf.cman == RTE_TM_CMAN_WRED)
+ n->wred_profile = tm_wred_profile_search(dev,
+ params->leaf.wred.wred_profile_id);
+
+ memcpy(&n->params, params, sizeof(n->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(nl, n, node);
+ p->soft.tm.h.n_nodes++;
+
+ /* Update dependencies */
+ if (n->parent_node)
+ n->parent_node->n_children++;
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users++;
+
+ for (i = 0; i < params->n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
+ ss->n_users++;
+ }
+
+ if (n->wred_profile)
+ n->wred_profile->n_users++;
+
+ p->soft.tm.h.n_tm_nodes[n->level]++;
+
+ return 0;
+}
+
+/* Traffic manager node delete */
+static int
+pmd_tm_node_delete(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node *n;
+ uint32_t i;
+
+ /* Check hierarchy changes are currently allowed */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Check existing */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Check unused */
+ if (n->n_children)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Update dependencies */
+ p->soft.tm.h.n_tm_nodes[n->level]--;
+
+ if (n->wred_profile)
+ n->wred_profile->n_users--;
+
+ for (i = 0; i < n->params.n_shared_shapers; i++) {
+ struct tm_shared_shaper *ss;
+
+ ss = tm_shared_shaper_search(dev,
+ n->params.shared_shaper_id[i]);
+ ss->n_users--;
+ }
+
+ if (n->shaper_profile)
+ n->shaper_profile->n_users--;
+
+ if (n->parent_node)
+ n->parent_node->n_children--;
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
+ p->soft.tm.h.n_nodes--;
+ free(n);
+
+ return 0;
+}
+
+
+static void
+pipe_profile_build(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_sched_pipe_params *pp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nt, *nq;
+
+ memset(pp, 0, sizeof(*pp));
+
+ /* Pipe */
+ pp->tb_rate = np->shaper_profile->params.peak.rate;
+ pp->tb_size = np->shaper_profile->params.peak.size;
+
+ /* Traffic Class (TC) */
+ pp->tc_period = PIPE_TC_PERIOD;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ pp->tc_ov_weight = np->weight;
+#endif
+
+ TAILQ_FOREACH(nt, nl, node) {
+ uint32_t queue_id = 0;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ pp->tc_rate[nt->priority] =
+ nt->shaper_profile->params.peak.rate;
+
+ /* Queue */
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t pipe_queue_id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node_id != nt->node_id)
+ continue;
+
+ pipe_queue_id = nt->priority *
+ RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+ pp->wrr_weights[pipe_queue_id] = nq->weight;
+
+ queue_id++;
+ }
+ }
+}
+
+static int
+pipe_profile_free_exists(struct rte_eth_dev *dev,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+ *pipe_profile_id = t->n_pipe_profiles;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+pipe_profile_exists(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t *pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t i;
+
+ for (i = 0; i < t->n_pipe_profiles; i++)
+ if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
+ if (pipe_profile_id)
+ *pipe_profile_id = i;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+pipe_profile_install(struct rte_eth_dev *dev,
+ struct rte_sched_pipe_params *pp,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+
+ memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
+ t->n_pipe_profiles++;
+}
+
+static void
+pipe_profile_mark(struct rte_eth_dev *dev,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t pipe_profile_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport, pos;
+
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ pos = subport_id * n_pipes_per_subport + pipe_id;
+
+ t->pipe_to_profile[pos] = pipe_profile_id;
+}
+
+static struct rte_sched_pipe_params *
+pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_params *t = &p->soft.tm.params;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
+ uint32_t pipe_profile_id = t->pipe_to_profile[pos];
+
+ return &t->pipe_profiles[pipe_profile_id];
+}
+
+static int
+pipe_profiles_generate(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *ns, *np;
+ uint32_t subport_id;
+
+ /* Objective: Fill in the following fields in struct tm_params:
+ * - pipe_profiles
+ * - n_pipe_profiles
+ * - pipe_to_profile
+ */
+
+ subport_id = 0;
+ TAILQ_FOREACH(ns, nl, node) {
+ uint32_t pipe_id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ pipe_id = 0;
+ TAILQ_FOREACH(np, nl, node) {
+ struct rte_sched_pipe_params pp;
+ uint32_t pos;
+
+ if (np->level != TM_NODE_LEVEL_PIPE ||
+ np->parent_node_id != ns->node_id)
+ continue;
+
+ pipe_profile_build(dev, np, &pp);
+
+ if (!pipe_profile_exists(dev, &pp, &pos)) {
+ if (!pipe_profile_free_exists(dev, &pos))
+ return -1;
+
+ pipe_profile_install(dev, &pp, pos);
+ }
+
+ pipe_profile_mark(dev, subport_id, pipe_id, pos);
+
+ pipe_id++;
+ }
+
+ subport_id++;
+ }
+
+ return 0;
+}
+
+static struct tm_wred_profile *
+tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *nq;
+
+ TAILQ_FOREACH(nq, nl, node) {
+ if (nq->level != TM_NODE_LEVEL_QUEUE ||
+ nq->parent_node->priority != tc_id)
+ continue;
+
+ return nq->wred_profile;
+ }
+
+ return NULL;
+}
+
+#ifdef RTE_SCHED_RED
+
+static void
+wred_profiles_set(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
+ uint32_t tc_id;
+ enum rte_tm_color color;
+
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
+ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
+ struct rte_red_params *dst =
+ &pp->red_params[tc_id][color];
+ struct tm_wred_profile *src_wp =
+ tm_tc_wred_profile_get(dev, tc_id);
+ struct rte_tm_red_params *src =
+ &src_wp->params.red_params[color];
+
+ memcpy(dst, src, sizeof(*dst));
+ }
+}
+
+#else
+
+#define wred_profiles_set(dev)
+
+#endif
+
+static struct tm_shared_shaper *
+tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
+{
+ return (tc_node->params.n_shared_shapers) ?
+ tm_shared_shaper_search(dev,
+ tc_node->params.shared_shaper_id[0]) :
+ NULL;
+}
+
+static struct tm_shared_shaper *
+tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
+ struct tm_node *subport_node,
+ uint32_t tc_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_node_list *nl = &p->soft.tm.h.nodes;
+ struct tm_node *n;
+
+ TAILQ_FOREACH(n, nl, node) {
+ if (n->level != TM_NODE_LEVEL_TC ||
+ n->parent_node->parent_node_id !=
+ subport_node->node_id ||
+ n->priority != tc_id)
+ continue;
+
+ return tm_tc_shared_shaper_get(dev, n);
+ }
+
+ return NULL;
+}
+
+static int
+hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_shared_shaper_list *ssl = &h->shared_shapers;
+ struct tm_wred_profile_list *wpl = &h->wred_profiles;
+ struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
+ struct tm_shared_shaper *ss;
+
+ uint32_t n_pipes_per_subport;
+
+ /* Root node exists. */
+ if (nr == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one subport, max is not exceeded. */
+ if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* There is at least one pipe. */
+ if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_LEVEL_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Number of pipes is the same for all subports. Maximum number of pipes
+ * per subport is not exceeded.
+ */
+ n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(ns, nl, node) {
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ if (ns->n_children != n_pipes_per_subport)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
+ TAILQ_FOREACH(np, nl, node) {
+ uint32_t mask = 0, mask_expected =
+ RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ uint32_t);
+
+ if (np->level != TM_NODE_LEVEL_PIPE)
+ continue;
+
+ if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node_id != np->node_id)
+ continue;
+
+ mask |= 1 << nt->priority;
+ }
+
+ if (mask != mask_expected)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Each TC has exactly 4 packet queues. */
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC)
+ continue;
+
+ if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /**
+ * Shared shapers:
+ * -For each TC #i, all pipes in the same subport use the same
+ * shared shaper (or no shared shaper) for their TC#i.
+ * -Each shared shaper needs to have at least one user. All its
+ * users have to be TC nodes with the same priority and the same
+ * subport.
+ */
+ TAILQ_FOREACH(ns, nl, node) {
+ struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ if (ns->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
+ s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
+
+ TAILQ_FOREACH(nt, nl, node) {
+ struct tm_shared_shaper *subport_ss, *tc_ss;
+
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->parent_node->parent_node_id !=
+ ns->node_id)
+ continue;
+
+ subport_ss = s[nt->priority];
+ tc_ss = tm_tc_shared_shaper_get(dev, nt);
+
+ if (subport_ss == NULL && tc_ss == NULL)
+ continue;
+
+ if ((subport_ss == NULL && tc_ss != NULL) ||
+ (subport_ss != NULL && tc_ss == NULL) ||
+ subport_ss->shared_shaper_id !=
+ tc_ss->shared_shaper_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ TAILQ_FOREACH(ss, ssl, node) {
+ struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
+ uint32_t n_users = 0;
+
+ if (nt_any != NULL)
+ TAILQ_FOREACH(nt, nl, node) {
+ if (nt->level != TM_NODE_LEVEL_TC ||
+ nt->priority != nt_any->priority ||
+ nt->parent_node->parent_node_id !=
+ nt_any->parent_node->parent_node_id)
+ continue;
+
+ n_users++;
+ }
+
+ if (ss->n_users == 0 || ss->n_users != n_users)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ /* Not too many pipe profiles. */
+ if (pipe_profiles_generate(dev))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /**
+ * WRED (when used, i.e. at least one WRED profile defined):
+ * -Each WRED profile must have at least one user.
+ * -All leaf nodes must have their private WRED context enabled.
+ * -For each TC #i, all leaf nodes must use the same WRED profile
+ * for their private WRED context.
+ */
+ if (h->n_wred_profiles) {
+ struct tm_wred_profile *wp;
+ struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t id;
+
+ TAILQ_FOREACH(wp, wpl, node)
+ if (wp->n_users == 0)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ w[id] = tm_tc_wred_profile_get(dev, id);
+
+ if (w[id] == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+
+ TAILQ_FOREACH(nq, nl, node) {
+ uint32_t id;
+
+ if (nq->level != TM_NODE_LEVEL_QUEUE)
+ continue;
+
+ id = nq->parent_node->priority;
+
+ if (nq->wred_profile == NULL ||
+ nq->wred_profile->wred_profile_id !=
+ w[id]->wred_profile_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+ }
+
+ return 0;
+}
+
+static void
+hierarchy_blueprints_create(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_params *t = &p->soft.tm.params;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+
+ struct tm_node_list *nl = &h->nodes;
+ struct tm_node *root = tm_root_node_present(dev), *n;
+
+ uint32_t subport_id;
+
+ t->port_params = (struct rte_sched_port_params) {
+ .name = dev->data->name,
+ .socket = dev->data->numa_node,
+ .rate = root->shaper_profile->params.peak.rate,
+ .mtu = dev->data->mtu,
+ .frame_overhead =
+ root->shaper_profile->params.pkt_length_adjust,
+ .n_subports_per_port = root->n_children,
+ .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
+ .qsize = {p->params.soft.tm.qsize[0],
+ p->params.soft.tm.qsize[1],
+ p->params.soft.tm.qsize[2],
+ p->params.soft.tm.qsize[3],
+ },
+ .pipe_profiles = t->pipe_profiles,
+ .n_pipe_profiles = t->n_pipe_profiles,
+ };
+
+ wred_profiles_set(dev);
+
+ subport_id = 0;
+ TAILQ_FOREACH(n, nl, node) {
+ uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t i;
+
+ if (n->level != TM_NODE_LEVEL_SUBPORT)
+ continue;
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ struct tm_shared_shaper *ss;
+ struct tm_shaper_profile *sp;
+
+ ss = tm_subport_tc_shared_shaper_get(dev, n, i);
+ sp = (ss) ? tm_shaper_profile_search(dev,
+ ss->shaper_profile_id) :
+ n->shaper_profile;
+ tc_rate[i] = sp->params.peak.rate;
+ }
+
+ t->subport_params[subport_id] =
+ (struct rte_sched_subport_params) {
+ .tb_rate = n->shaper_profile->params.peak.rate,
+ .tb_size = n->shaper_profile->params.peak.size,
+
+ .tc_rate = {tc_rate[0],
+ tc_rate[1],
+ tc_rate[2],
+ tc_rate[3],
+ },
+ .tc_period = SUBPORT_TC_PERIOD,
+ };
+
+ subport_id++;
+ }
+}
+
+/* Traffic manager hierarchy commit */
+static int
+pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ int status;
+
+ /* Checks */
+ if (p->soft.tm.hierarchy_frozen)
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ status = hierarchy_commit_check(dev, error);
+ if (status) {
+ if (clear_on_fail) {
+ tm_hierarchy_uninit(p);
+ tm_hierarchy_init(p);
+ }
+
+ return status;
+ }
+
+ /* Create blueprints */
+ hierarchy_blueprints_create(dev);
+
+ /* Freeze hierarchy */
+ p->soft.tm.hierarchy_frozen = 1;
+
+ return 0;
+}
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+
+static int
+update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_ov_weight = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->weight = weight;
+
+ return 0;
+}
+
+#endif
+
+static int
+update_queue_weight(struct rte_eth_dev *dev,
+ struct tm_node *nq, uint32_t weight)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t pipe_queue_id =
+ tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set
+ * of pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nq->weight = weight;
+
+ return 0;
+}
+
+/* Traffic manager node parent update */
+static int
+pmd_tm_node_parent_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t parent_node_id,
+ uint32_t priority,
+ uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Parent node must be the same */
+ if (n->parent_node_id != parent_node_id)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Priority must be the same */
+ if (n->priority != priority)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PRIORITY,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* weight: must be 1 .. 255 */
+ if (weight == 0 || weight >= UINT8_MAX)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ if (update_pipe_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+#else
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+#endif
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_WEIGHT,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ if (update_queue_weight(dev, n, weight))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+static int
+update_subport_rate(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_subport_params subport_params;
+
+ /* Derive new subport configuration. */
+ memcpy(&subport_params,
+ &p->soft.tm.params.subport_params[subport_id],
+ sizeof(subport_params));
+ subport_params.tb_rate = sp->params.peak.rate;
+ subport_params.tb_size = sp->params.peak.size;
+
+ /* Update the subport configuration. */
+ if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+ &subport_params))
+ return -1;
+
+ /* Commit changes. */
+ ns->shaper_profile->n_users--;
+
+ ns->shaper_profile = sp;
+ ns->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ memcpy(&p->soft.tm.params.subport_params[subport_id],
+ &subport_params,
+ sizeof(subport_params));
+
+ return 0;
+}
+
+static int
+update_pipe_rate(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tb_rate = sp->params.peak.rate;
+ profile1.tb_size = sp->params.peak.size;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ np->shaper_profile->n_users--;
+ np->shaper_profile = sp;
+ np->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+static int
+update_tc_rate(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct tm_shaper_profile *sp)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
+ struct rte_sched_pipe_params profile1;
+ uint32_t pipe_profile_id;
+
+ /* Derive new pipe profile. */
+ memcpy(&profile1, profile0, sizeof(profile1));
+ profile1.tc_rate[tc_id] = sp->params.peak.rate;
+
+ /* Since implementation does not allow adding more pipe profiles after
+ * port configuration, the pipe configuration can be successfully
+ * updated only if the new profile is also part of the existing set of
+ * pipe profiles.
+ */
+ if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
+ return -1;
+
+ /* Update the pipe profile used by the current pipe. */
+ if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ (int32_t)pipe_profile_id))
+ return -1;
+
+ /* Commit changes. */
+ pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
+ nt->shaper_profile->n_users--;
+ nt->shaper_profile = sp;
+ nt->params.shaper_profile_id = sp->shaper_profile_id;
+ sp->n_users++;
+
+ return 0;
+}
+
+/* Traffic manager node shaper update */
+static int
+pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+ struct tm_shaper_profile *sp;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ /* Shaper profile must be valid. */
+ sp = tm_shaper_profile_search(dev, shaper_profile_id);
+ if (sp == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ /* fall-through */
+ case TM_NODE_LEVEL_SUBPORT:
+ if (update_subport_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_PIPE:
+ if (update_pipe_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_TC:
+ if (update_tc_rate(dev, n, sp))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ /* fall-through */
+ case TM_NODE_LEVEL_QUEUE:
+ /* fall-through */
+ default:
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ }
+}
+
+static inline uint32_t
+tm_port_queue_id(struct rte_eth_dev *dev,
+ uint32_t port_subport_id,
+ uint32_t subport_pipe_id,
+ uint32_t pipe_tc_id,
+ uint32_t tc_queue_id)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
+ h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+
+ uint32_t port_pipe_id =
+ port_subport_id * n_pipes_per_subport + subport_pipe_id;
+ uint32_t port_tc_id =
+ port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
+ uint32_t port_queue_id =
+ port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
+
+ return port_queue_id;
+}
+
+static int
+read_port_stats(struct rte_eth_dev *dev,
+ struct tm_node *nr,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct tm_hierarchy *h = &p->soft.tm.h;
+ uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
+ uint32_t subport_id;
+
+ for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(
+ p->soft.tm.sched,
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
+ nr->stats.n_pkts +=
+ s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
+ nr->stats.n_bytes +=
+ s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
+ nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[id];
+ nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[id];
+ }
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nr->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nr->stats, 0, sizeof(nr->stats));
+
+ return 0;
+}
+
+static int
+read_subport_stats(struct rte_eth_dev *dev,
+ struct tm_node *ns,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+ struct rte_sched_subport_stats s;
+ uint32_t tc_ov, tc_id;
+
+ /* Stats read */
+ int status = rte_sched_subport_read_stats(
+ p->soft.tm.sched,
+ subport_id,
+ &s,
+ &tc_ov);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
+ ns->stats.n_pkts +=
+ s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
+ ns->stats.n_bytes +=
+ s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
+ ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
+ s.n_pkts_tc_dropped[tc_id];
+ ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_tc_dropped[tc_id];
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &ns->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&ns->stats, 0, sizeof(ns->stats));
+
+ return 0;
+}
+
+static int
+read_pipe_stats(struct rte_eth_dev *dev,
+ struct tm_node *np,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+ i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ np->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &np->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ return 0;
+}
+
+static int
+read_tc_stats(struct rte_eth_dev *dev,
+ struct tm_node *nt,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ uint32_t i;
+
+ /* Stats read */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ i);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nt->stats.leaf.n_pkts_queued = qlen;
+ }
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nt->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_DEFAULT;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nt->stats, 0, sizeof(nt->stats));
+
+ return 0;
+}
+
+static int
+read_queue_stats(struct rte_eth_dev *dev,
+ struct tm_node *nq,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_sched_queue_stats s;
+ uint16_t qlen;
+
+ uint32_t queue_id = tm_node_queue_id(dev, nq);
+
+ struct tm_node *nt = nq->parent_node;
+ uint32_t tc_id = tm_node_tc_id(dev, nt);
+
+ struct tm_node *np = nt->parent_node;
+ uint32_t pipe_id = tm_node_pipe_id(dev, np);
+
+ struct tm_node *ns = np->parent_node;
+ uint32_t subport_id = tm_node_subport_id(dev, ns);
+
+ /* Stats read */
+ uint32_t qid = tm_port_queue_id(dev,
+ subport_id,
+ pipe_id,
+ tc_id,
+ queue_id);
+
+ int status = rte_sched_queue_read_stats(
+ p->soft.tm.sched,
+ qid,
+ &s,
+ &qlen);
+ if (status)
+ return status;
+
+ /* Stats accumulate */
+ nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
+ nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
+ nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
+ s.n_bytes_dropped;
+ nq->stats.leaf.n_pkts_queued = qlen;
+
+ /* Stats copy */
+ if (stats)
+ memcpy(stats, &nq->stats, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = STATS_MASK_QUEUE;
+
+ /* Stats clear */
+ if (clear)
+ memset(&nq->stats, 0, sizeof(nq->stats));
+
+ return 0;
+}
+
+/* Traffic manager read stats counters for specific node */
+static int
+pmd_tm_node_stats_read(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_tm_error *error)
+{
+ struct tm_node *n;
+
+ /* Port must be started and TM used. */
+ if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
+ return -rte_tm_error_set(error,
+ EBUSY,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EBUSY));
+
+ /* Node must be valid */
+ n = tm_node_search(dev, node_id);
+ if (n == NULL)
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL,
+ rte_strerror(EINVAL));
+
+ switch (n->level) {
+ case TM_NODE_LEVEL_PORT:
+ if (read_port_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_SUBPORT:
+ if (read_subport_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_PIPE:
+ if (read_pipe_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_TC:
+ if (read_tc_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+
+ case TM_NODE_LEVEL_QUEUE:
+ default:
+ if (read_queue_stats(dev, n, stats, stats_mask, clear))
+ return -rte_tm_error_set(error,
+ EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(EINVAL));
+ return 0;
+ }
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+ .node_type_get = pmd_tm_node_type_get,
+ .capabilities_get = pmd_tm_capabilities_get,
+ .level_capabilities_get = pmd_tm_level_capabilities_get,
+ .node_capabilities_get = pmd_tm_node_capabilities_get,
+
+ .wred_profile_add = pmd_tm_wred_profile_add,
+ .wred_profile_delete = pmd_tm_wred_profile_delete,
+ .shared_wred_context_add_update = NULL,
+ .shared_wred_context_delete = NULL,
+
+ .shaper_profile_add = pmd_tm_shaper_profile_add,
+ .shaper_profile_delete = pmd_tm_shaper_profile_delete,
+ .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
+ .shared_shaper_delete = pmd_tm_shared_shaper_delete,
+
+ .node_add = pmd_tm_node_add,
+ .node_delete = pmd_tm_node_delete,
+ .node_suspend = NULL,
+ .node_resume = NULL,
+ .hierarchy_commit = pmd_tm_hierarchy_commit,
+
+ .node_parent_update = pmd_tm_node_parent_update,
+ .node_shaper_update = pmd_tm_node_shaper_update,
+ .node_shared_shaper_update = NULL,
+ .node_stats_update = NULL,
+ .node_wfq_weight_mode_update = NULL,
+ .node_cman_update = NULL,
+ .node_wred_context_update = NULL,
+ .node_shared_wred_context_update = NULL,
+
+ .node_stats_read = pmd_tm_node_stats_read,
+};
diff --git a/drivers/net/softnic/rte_pmd_eth_softnic_version.map b/drivers/net/softnic/rte_pmd_eth_softnic_version.map
new file mode 100644
index 00000000..fb2cb68c
--- /dev/null
+++ b/drivers/net/softnic/rte_pmd_eth_softnic_version.map
@@ -0,0 +1,7 @@
+DPDK_17.11 {
+ global:
+
+ rte_pmd_softnic_run;
+
+ local: *;
+};
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 0e96b922..0ebd3ec5 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -39,6 +39,9 @@ LIB = librte_pmd_szedata2.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lsze2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_szedata2_version.map
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 9c0d57cc..403cfdbb 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -71,7 +71,7 @@
struct szedata2_rx_queue {
struct szedata *sze;
uint8_t rx_channel;
- uint8_t in_port;
+ uint16_t in_port;
struct rte_mempool *mb_pool;
volatile uint64_t rx_pkts;
volatile uint64_t rx_bytes;
@@ -1042,7 +1042,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->speed_capa = ETH_LINK_SPEED_100G;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
@@ -1077,6 +1077,8 @@ eth_stats_get(struct rte_eth_dev *dev,
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
stats->oerrors = tx_err_total;
+
+ return 0;
}
static void
@@ -1538,7 +1540,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
return -EINVAL;
}
snprintf(rsc_filename, PATH_MAX,
- "%s/" PCI_PRI_FMT "/resource%u", pci_get_sysfs_path(),
+ "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
pci_addr->domain, pci_addr->bus,
pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
fd = open(rsc_filename, O_RDWR);
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
index b0de0284..405b49e4 100644
--- a/drivers/net/tap/Makefile
+++ b/drivers/net/tap/Makefile
@@ -43,6 +43,9 @@ CFLAGS += -O3
CFLAGS += -I$(SRCDIR)
CFLAGS += -I.
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_vdev
#
# all source are stored in SRCS-y
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 9acea839..6b27679a 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -39,7 +39,7 @@
#include <rte_ethdev.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_net.h>
#include <rte_debug.h>
@@ -603,8 +603,31 @@ tap_dev_stop(struct rte_eth_dev *dev)
}
static int
-tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
+tap_dev_configure(struct rte_eth_dev *dev)
{
+ if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
+ RTE_LOG(ERR, PMD,
+ "%s: number of rx queues %d exceeds max num of queues %d\n",
+ dev->device->name,
+ dev->data->nb_rx_queues,
+ RTE_PMD_TAP_MAX_QUEUES);
+ return -1;
+ }
+ if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
+ RTE_LOG(ERR, PMD,
+ "%s: number of tx queues %d exceeds max num of queues %d\n",
+ dev->device->name,
+ dev->data->nb_tx_queues,
+ RTE_PMD_TAP_MAX_QUEUES);
+ return -1;
+ }
+
+ RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
+ dev->device->name, (void *)dev, dev->data->nb_tx_queues);
+
+ RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
+ dev->device->name, (void *)dev, dev->data->nb_rx_queues);
+
return 0;
}
@@ -650,8 +673,8 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
- dev_info->max_rx_queues = internals->nb_queues;
- dev_info->max_tx_queues = internals->nb_queues;
+ dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
+ dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
@@ -664,7 +687,7 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_TCP_CKSUM);
}
-static void
+static int
tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
{
unsigned int i, imax;
@@ -673,9 +696,9 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
unsigned long rx_nombuf = 0, ierrors = 0;
const struct pmd_internals *pmd = dev->data->dev_private;
- imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
- pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
-
+ /* rx queue statistics */
+ imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
for (i = 0; i < imax; i++) {
tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
@@ -683,7 +706,13 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
rx_bytes_total += tap_stats->q_ibytes[i];
rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
ierrors += pmd->rxq[i].stats.ierrors;
+ }
+ /* tx queue statistics */
+ imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+
+ for (i = 0; i < imax; i++) {
tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
@@ -699,6 +728,7 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
tap_stats->opackets = tx_total;
tap_stats->oerrors = tx_err_total;
tap_stats->obytes = tx_bytes_total;
+ return 0;
}
static void
@@ -707,7 +737,7 @@ tap_stats_reset(struct rte_eth_dev *dev)
int i;
struct pmd_internals *pmd = dev->data->dev_private;
- for (i = 0; i < pmd->nb_queues; i++) {
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
pmd->rxq[i].stats.ipackets = 0;
pmd->rxq[i].stats.ibytes = 0;
pmd->rxq[i].stats.ierrors = 0;
@@ -729,11 +759,15 @@ tap_dev_close(struct rte_eth_dev *dev)
tap_flow_flush(dev, NULL);
tap_flow_implicit_flush(internals, NULL);
- for (i = 0; i < internals->nb_queues; i++) {
- if (internals->rxq[i].fd != -1)
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ if (internals->rxq[i].fd != -1) {
close(internals->rxq[i].fd);
- internals->rxq[i].fd = -1;
- internals->txq[i].fd = -1;
+ internals->rxq[i].fd = -1;
+ }
+ if (internals->txq[i].fd != -1) {
+ close(internals->txq[i].fd);
+ internals->txq[i].fd = -1;
+ }
}
if (internals->remote_if_index) {
@@ -887,30 +921,57 @@ tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
static int
tap_setup_queue(struct rte_eth_dev *dev,
struct pmd_internals *internals,
- uint16_t qid)
+ uint16_t qid,
+ int is_rx)
{
+ int *fd;
+ int *other_fd;
+ const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
- int fd = rx->fd == -1 ? tx->fd : rx->fd;
- if (fd == -1) {
- RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
- pmd->name, qid);
- fd = tun_alloc(pmd);
- if (fd < 0) {
+ if (is_rx) {
+ fd = &rx->fd;
+ other_fd = &tx->fd;
+ dir = "rx";
+ } else {
+ fd = &tx->fd;
+ other_fd = &rx->fd;
+ dir = "tx";
+ }
+ if (*fd != -1) {
+ /* fd for this queue already exists */
+ RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
+ pmd->name, *fd, dir, qid);
+ } else if (*other_fd != -1) {
+ /* Only other_fd exists. dup it */
+ *fd = dup(*other_fd);
+ if (*fd < 0) {
+ *fd = -1;
+ RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
+ pmd->name);
+ return -1;
+ }
+ RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
+ pmd->name, *other_fd, dir, qid, *fd);
+ } else {
+ /* Both RX and TX fds do not exist (equal -1). Create fd */
+ *fd = tun_alloc(pmd);
+ if (*fd < 0) {
+ *fd = -1; /* restore original value */
RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
pmd->name);
return -1;
}
+ RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
+ pmd->name, dir, qid, *fd);
}
- rx->fd = fd;
- tx->fd = fd;
tx->mtu = &dev->data->mtu;
rx->rxmode = &dev->data->dev_conf.rxmode;
- return fd;
+ return *fd;
}
static int
@@ -932,10 +993,10 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
int fd;
int i;
- if ((rx_queue_id >= internals->nb_queues) || !mp) {
+ if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
RTE_LOG(WARNING, PMD,
- "nb_queues %d too small or mempool NULL\n",
- internals->nb_queues);
+ "nb_rx_queues %d too small or mempool NULL\n",
+ dev->data->nb_rx_queues);
return -1;
}
@@ -954,7 +1015,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
rxq->iovecs = iovecs;
dev->data->rx_queues[rx_queue_id] = rxq;
- fd = tap_setup_queue(dev, internals, rx_queue_id);
+ fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
if (fd == -1) {
ret = fd;
goto error;
@@ -1002,11 +1063,11 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
int ret;
- if (tx_queue_id >= internals->nb_queues)
+ if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
- ret = tap_setup_queue(dev, internals, tx_queue_id);
+ ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
return -1;
@@ -1072,10 +1133,11 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set)
/* In any case, disable interrupt if the conf is no longer there. */
if (!dev->data->dev_conf.intr_conf.lsc) {
- if (pmd->intr_handle.fd != -1)
+ if (pmd->intr_handle.fd != -1) {
nl_final(pmd->intr_handle.fd);
- rte_intr_callback_unregister(
- &pmd->intr_handle, tap_dev_intr_handler, dev);
+ rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+ }
return 0;
}
if (set) {
@@ -1166,7 +1228,6 @@ static const struct eth_dev_ops ops = {
.filter_ctrl = tap_dev_filter_ctrl,
};
-
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
char *remote_iface, int fixed_mac_type)
@@ -1193,8 +1254,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
}
pmd = dev->data->dev_private;
+ pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
- pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (pmd->ioctl_sock == -1) {
@@ -1207,13 +1268,14 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
/* Setup some default values */
rte_memcpy(data, dev->data, sizeof(*data));
data->dev_private = pmd;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
data->numa_node = numa_node;
data->dev_link = pmd_link;
data->mac_addrs = &pmd->eth_addr;
- data->nb_rx_queues = pmd->nb_queues;
- data->nb_tx_queues = pmd->nb_queues;
+ /* Set the number of RX and TX queues */
+ data->nb_rx_queues = 0;
+ data->nb_tx_queues = 0;
dev->data = data;
dev->dev_ops = &ops;
@@ -1241,7 +1303,11 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
}
/* Immediately create the netdevice (this will create the 1st queue). */
- if (tap_setup_queue(dev, pmd, 0) == -1)
+ /* rx queue */
+ if (tap_setup_queue(dev, pmd, 0, 1) == -1)
+ goto error_exit;
+ /* tx queue */
+ if (tap_setup_queue(dev, pmd, 0, 0) == -1)
goto error_exit;
ifr.ifr_mtu = dev->data->mtu;
@@ -1515,9 +1581,16 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
tap_flow_implicit_flush(internals, NULL);
nl_final(internals->nlsk_fd);
}
- for (i = 0; i < internals->nb_queues; i++)
- if (internals->rxq[i].fd != -1)
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ if (internals->rxq[i].fd != -1) {
close(internals->rxq[i].fd);
+ internals->rxq[i].fd = -1;
+ }
+ if (internals->txq[i].fd != -1) {
+ close(internals->txq[i].fd);
+ internals->txq[i].fd = -1;
+ }
+ }
close(internals->ioctl_sock);
rte_free(eth_dev->data->dev_private);
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 928a0454..829f32f3 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -80,9 +80,9 @@ struct tx_queue {
};
struct pmd_internals {
+ struct rte_eth_dev *dev; /* Ethernet device. */
char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
- uint16_t nb_queues; /* Number of queues supported */
struct ether_addr eth_addr; /* Mac address of the device port */
struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
int remote_if_index; /* remote netdevice IF_INDEX */
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 41f73452..ffc0b85b 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -1089,13 +1089,29 @@ priv_flow_process(struct pmd_internals *pmd,
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
if (action)
goto exit_action_not_supported;
action = 1;
- if (!queue || (queue->index >= pmd->nb_queues))
+ if (!queue ||
+ (queue->index > pmd->dev->data->nb_rx_queues - 1))
goto exit_action_not_supported;
if (flow)
err = add_action_skbedit(flow, queue->index);
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ /* Fake RSS support. */
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (!rss || rss->num < 1 ||
+ (rss->queue[0] > pmd->dev->data->nb_rx_queues - 1))
+ goto exit_action_not_supported;
+ if (flow)
+ err = add_action_skbedit(flow, rss->queue[0]);
} else {
goto exit_action_not_supported;
}
@@ -1446,7 +1462,7 @@ tap_flow_isolate(struct rte_eth_dev *dev,
return 0;
error:
pmd->flow_isolate = 0;
- return -rte_flow_error_set(
+ return rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"TC rule creation failed");
}
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index 915ae945..e50e1ad8 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -40,12 +40,15 @@ LIB = librte_pmd_thunderx_nicvf.a
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lm
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_thunderx_nicvf_version.map
LIBABIVER := 1
-OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index 2634285e..dc0af1ca 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -509,7 +509,7 @@ nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
struct rbdr_entry_t *desc, *desc0;
struct nicvf_rbdr *rbdr = nic->rbdr;
uint32_t count;
- nicvf_phys_addr_t phy;
+ nicvf_iova_addr_t phy;
assert(rbdr != NULL);
desc = rbdr->desc;
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index b7d0a3dc..698aa487 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -88,7 +88,7 @@ enum nicvf_err_e {
NICVF_ERR_RSS_GET_SZ, /* -8171 */
};
-typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
+typedef nicvf_iova_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
struct nicvf_hw_rx_qstats {
uint64_t q_rx_bytes;
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 0fe673e6..e7e092b6 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -213,7 +213,7 @@
#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
#define assert_primary(nic) assert((nic)->sqs_mode == 0)
-typedef uint64_t nicvf_phys_addr_t;
+typedef uint64_t nicvf_iova_addr_t;
/* vNIC HW Enumerations */
@@ -840,7 +840,7 @@ struct rbdr_entry_t {
uint64_t buf_addr:42;
uint64_t cache_align:7;
};
- nicvf_phys_addr_t full_addr;
+ nicvf_iova_addr_t full_addr;
};
#else
union {
@@ -849,7 +849,7 @@ struct rbdr_entry_t {
uint64_t buf_addr:42;
uint64_t rsvd0:15;
};
- nicvf_phys_addr_t full_addr;
+ nicvf_iova_addr_t full_addr;
};
#endif
};
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index edc17f1d..d65d3cee 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -61,6 +61,7 @@
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_tailq.h>
#include "base/nicvf_plat.h"
@@ -242,7 +243,7 @@ nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
return -ENOTSUP;
}
-static void
+static int
nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint16_t qidx;
@@ -332,6 +333,8 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->opackets += port_stats.tx_bcast_frames_ok;
stats->opackets += port_stats.tx_mcast_frames_ok;
stats->oerrors = port_stats.tx_drops;
+
+ return 0;
}
static const uint32_t *
@@ -602,7 +605,7 @@ nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
memset(rz->addr, 0, ring_size);
- rxq->phys = rz->phys_addr;
+ rxq->phys = rz->iova;
rxq->desc = rz->addr;
rxq->qlen_mask = desc_cnt - 1;
@@ -626,7 +629,7 @@ nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
memset(rz->addr, 0, ring_size);
- sq->phys = rz->phys_addr;
+ sq->phys = rz->iova;
sq->desc = rz->addr;
sq->qlen_mask = desc_cnt - 1;
@@ -660,7 +663,7 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
memset(rz->addr, 0, ring_size);
- rbdr->phys = rz->phys_addr;
+ rbdr->phys = rz->iova;
rbdr->tail = 0;
rbdr->next_tail = 0;
rbdr->desc = rz->addr;
@@ -677,7 +680,7 @@ nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
static void
nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
- nicvf_phys_addr_t phy)
+ nicvf_iova_addr_t phy)
{
uint16_t qidx;
void *obj;
@@ -1380,6 +1383,13 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
dev_info->max_rx_queues =
@@ -1419,7 +1429,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
};
}
-static nicvf_phys_addr_t
+static nicvf_iova_addr_t
rbdr_rte_mempool_get(void *dev, void *opaque)
{
uint16_t qidx;
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 3734430f..71bc3cf2 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -78,7 +78,7 @@ nicvf_mempool_phy_offset(struct rte_mempool *mp)
hdr = STAILQ_FIRST(&mp->mem_list);
assert(hdr != NULL);
- return (uint64_t)((uintptr_t)hdr->addr - hdr->phys_addr);
+ return (uint64_t)((uintptr_t)hdr->addr - hdr->iova);
}
static inline uint16_t
@@ -104,7 +104,7 @@ nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
* P = V - offset
*/
static inline uintptr_t
-nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off)
+nicvf_mbuff_phy2virt(rte_iova_t phy, uint64_t mbuf_phys_off)
{
return (uintptr_t)(phy + mbuf_phys_off);
}
@@ -112,7 +112,7 @@ nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off)
static inline uintptr_t
nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
{
- return (phys_addr_t)(virt - mbuf_phys_off);
+ return (rte_iova_t)(virt - mbuf_phys_off);
}
static inline void
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index cd1b754b..a3ccce29 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -60,7 +60,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
sqe.gather.size = pkt->data_len;
- sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
+ sqe.gather.addr = rte_mbuf_data_iova(pkt);
entry->buff[0] = sqe.buff[0];
entry->buff[1] = sqe.buff[1];
@@ -80,7 +80,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
(uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
pkt->data_len;
- entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
+ entry->buff[1] = rte_mbuf_data_iova(pkt);
}
#endif
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 4ee6c3bb..0f8208ef 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -46,7 +46,7 @@ struct nicvf_rbdr {
uintptr_t rbdr_status;
uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
uint32_t buffsz;
uint32_t tail;
uint32_t next_tail;
@@ -56,7 +56,7 @@ struct nicvf_rbdr {
struct nicvf_txq {
union sq_entry_t *desc;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
struct rte_mbuf **txbuffs;
uintptr_t sq_head;
uintptr_t sq_door;
@@ -87,7 +87,7 @@ struct nicvf_rxq {
uintptr_t cq_status;
uintptr_t cq_door;
union mbuf_initializer mbuf_initializer;
- nicvf_phys_addr_t phys;
+ nicvf_iova_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
struct nicvf *nic;
@@ -100,7 +100,7 @@ struct nicvf_rxq {
uint16_t queue_id;
uint16_t precharge_cnt;
uint8_t rx_drop_en;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t rbptr_offset;
} __rte_cache_aligned;
diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile
index 3ba8ad64..c411745b 100644
--- a/drivers/net/vhost/Makefile
+++ b/drivers/net/vhost/Makefile
@@ -37,13 +37,16 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_vhost.a
LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_vhost
+LDLIBS += -lrte_bus_vdev
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_pmd_vhost_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 0dac5e60..2536ee4a 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -39,7 +39,7 @@
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_vhost.h>
#include <rte_spinlock.h>
@@ -52,6 +52,7 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
+#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
@@ -59,6 +60,7 @@ static const char *valid_arguments[] = {
ETH_VHOST_QUEUES_ARG,
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
+ ETH_VHOST_IOMMU_SUPPORT,
NULL
};
@@ -105,7 +107,7 @@ struct vhost_queue {
rte_atomic32_t while_queuing;
struct pmd_internal *internal;
struct rte_mempool *mb_pool;
- uint8_t port;
+ uint16_t port;
uint16_t virtqueue_id;
struct vhost_stats stats;
};
@@ -705,7 +707,7 @@ static struct vhost_device_ops vhost_ops = {
};
int
-rte_eth_vhost_get_queue_event(uint8_t port_id,
+rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event)
{
struct rte_vhost_vring_state *state;
@@ -742,7 +744,7 @@ rte_eth_vhost_get_queue_event(uint8_t port_id,
}
int
-rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
+rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
{
struct internal_list *list;
struct rte_eth_dev *eth_dev;
@@ -890,7 +892,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
@@ -928,6 +930,8 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
+
+ return 0;
}
static void
@@ -1084,8 +1088,7 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
internal->max_queues = queues;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
- data->dev_flags =
- RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
eth_dev->dev_ops = &ops;
@@ -1163,6 +1166,7 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
uint64_t flags = 0;
int client_mode = 0;
int dequeue_zero_copy = 0;
+ int iommu_support = 0;
RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
rte_vdev_device_name(dev));
@@ -1210,6 +1214,16 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
+ &open_int, &iommu_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (iommu_support)
+ flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index 39ca7719..948f3c81 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -69,7 +69,7 @@ struct rte_eth_vhost_queue_event {
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_vhost_get_queue_event(uint8_t port_id,
+int rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event);
/**
@@ -79,7 +79,7 @@ int rte_eth_vhost_get_queue_event(uint8_t port_id,
* - On success, the 'vid' associated with 'port_id'.
* - On failure, a negative value.
*/
-int rte_eth_vhost_get_vid_from_port_id(uint8_t port_id);
+int rte_eth_vhost_get_vid_from_port_id(uint16_t port_id);
#ifdef __cplusplus
}
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index b21b8781..f2b5d1c3 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -38,6 +38,12 @@ LIB = librte_pmd_virtio.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
+LDLIBS += -lrte_bus_vdev
+endif
EXPORT_MAP := rte_pmd_virtio_version.map
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e320811e..d2576d5e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -46,9 +46,11 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_common.h>
#include <rte_errno.h>
+#include <rte_cpuflags.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -72,11 +74,12 @@ static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
-static void virtio_dev_stats_get(struct rte_eth_dev *dev,
+static int virtio_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -162,7 +165,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl result;
+ struct virtio_pmd_ctrl *result;
struct virtqueue *vq;
ctrl->status = status;
@@ -253,10 +256,9 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- memcpy(&result, cvq->virtio_net_hdr_mz->addr,
- sizeof(struct virtio_pmd_ctrl));
+ result = cvq->virtio_net_hdr_mz->addr;
- return result.status;
+ return result->status;
}
static int
@@ -426,10 +428,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->phys_addr);
+ (uint64_t)mz->iova);
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
@@ -474,13 +476,13 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
hw->cvq = cvq;
@@ -491,7 +493,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
- vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
vq->offset = offsetof(struct rte_mbuf, buf_addr);
@@ -779,6 +781,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.stats_reset = virtio_dev_stats_reset,
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
+ .vlan_offload_set = virtio_dev_vlan_offload_set,
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
@@ -964,10 +967,12 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
-static void
+static int
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
virtio_update_stats(dev, stats);
+
+ return 0;
}
static void
@@ -1235,14 +1240,36 @@ virtio_interrupt_handler(void *param)
}
+/* set rx and tx handlers according to what is supported */
static void
-rx_func_get(struct rte_eth_dev *eth_dev)
+set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
- else
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+
+ if (hw->use_simple_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
}
/* Only support 1:1 queue/interrupt mapping so far.
@@ -1360,15 +1387,12 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
- eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
/* If host does not support both status and MSI-X then disable LSC */
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- rx_func_get(eth_dev);
-
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
@@ -1534,7 +1558,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
@@ -1544,12 +1567,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
}
virtio_set_vtpci_ops(hw);
- if (hw->use_simple_rxtx) {
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else {
- rx_func_get(eth_dev);
- }
+ set_rxtx_funcs(eth_dev);
+
return 0;
}
@@ -1659,9 +1678,11 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t req_features;
int ret;
PMD_INIT_LOG(DEBUG, "configure");
+ req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
if (dev->data->dev_conf.intr_conf.rxq) {
ret = virtio_init_device(dev, hw->req_guest_features);
@@ -1669,16 +1690,37 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- /* Virtio does L4 checksum but not L3! */
- if (rxmode->hw_ip_checksum) {
- PMD_DRV_LOG(NOTICE,
- "virtio does not support IP checksum");
+ /* The name hw_ip_checksum is a bit confusing since it can be
+ * set by the application to request L3 and/or L4 checksums. In
+ * case of virtio, only L4 checksum is supported.
+ */
+ if (rxmode->hw_ip_checksum)
+ req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+
+ if (rxmode->enable_lro)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+
+ /* if request features changed, reinit the device */
+ if (req_features != hw->req_guest_features) {
+ ret = virtio_init_device(dev, req_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (rxmode->hw_ip_checksum &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ PMD_DRV_LOG(ERR,
+ "rx checksum not available on this host");
return -ENOTSUP;
}
- if (rxmode->enable_lro) {
- PMD_DRV_LOG(NOTICE,
- "virtio does not support Large Receive Offload");
+ if (rxmode->enable_lro &&
+ (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ PMD_DRV_LOG(ERR,
+ "Large Receive Offload not available on this host");
return -ENOTSUP;
}
@@ -1690,7 +1732,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (rxmode->hw_vlan_filter
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
}
@@ -1703,6 +1745,23 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
+ hw->use_simple_rx = 1;
+ hw->use_simple_tx = 1;
+
+#if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+#endif
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+
+ if (rxmode->hw_ip_checksum)
+ hw->use_simple_rx = 0;
+
return 0;
}
@@ -1714,6 +1773,19 @@ virtio_dev_start(struct rte_eth_dev *dev)
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq __rte_unused;
struct virtio_hw *hw = dev->data->dev_private;
+ int ret;
+
+ /* Finish the initialization of the queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = virtio_dev_rx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret = virtio_dev_tx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
@@ -1751,9 +1823,16 @@ virtio_dev_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
+ /* Flush the old packets */
+ virtqueue_flush(rxvq->vq);
virtqueue_notify(rxvq->vq);
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_notify(txvq->vq);
+ }
+
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -1766,6 +1845,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ set_rxtx_funcs(dev);
hw->started = 1;
/* Initialize Link state */
@@ -1875,6 +1955,29 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
return (old.link_status == link.link_status) ? -1 : 0;
}
+static int
+virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->hw_vlan_filter &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+
+ PMD_DRV_LOG(NOTICE,
+ "vlan filtering not available on this host");
+
+ return -ENOTSUP;
+ }
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = rxmode->hw_vlan_strip;
+
+ return 0;
+}
+
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -1904,6 +2007,8 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index c3413c6d..2039bc54 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -92,10 +92,16 @@ int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index e6da6802..55b717c0 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -553,7 +553,7 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
uint32_t offset = cap->offset;
uint8_t *base;
- if (bar > 5) {
+ if (bar >= PCI_MAX_RESOURCE) {
PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
return NULL;
}
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 18caebdd..36d452c0 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -37,6 +37,7 @@
#include <stdint.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
struct virtqueue;
@@ -259,8 +260,9 @@ struct virtio_hw {
uint8_t vlan_strip;
uint8_t use_msix;
uint8_t modern;
- uint8_t use_simple_rxtx;
- uint8_t port_id;
+ uint8_t use_simple_rx;
+ uint8_t use_simple_tx;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index e30377c5..390c137c 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -39,7 +39,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
@@ -50,7 +49,6 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_byteorder.h>
-#include <rte_cpuflags.h>
#include <rte_net.h>
#include <rte_ip.h>
#include <rte_udp.h>
@@ -81,7 +79,7 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
return VIRTQUEUE_NUSED(vq) >= offset;
}
-static void
+void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
@@ -300,6 +298,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
/* prepend cannot fail, checked by caller */
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
/* if offload disabled, it is not zeroed below, do it now */
if (offload == 0) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
@@ -421,9 +423,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_rx *rxvq;
- int error, nbufs;
- struct rte_mbuf *m;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
@@ -440,12 +439,26 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
dev->data->rx_queues[queue_idx] = rxvq;
+ return 0;
+}
+
+int
+virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_rx *rxvq = &vq->rxq;
+ struct rte_mbuf *m;
+ uint16_t desc_idx;
+ int error, nbufs;
+
+ PMD_INIT_FUNC_TRACE();
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- error = ENOSPC;
- if (hw->use_simple_rxtx) {
+ if (hw->use_simple_rx) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_ring.avail->ring[desc_idx] = desc_idx;
@@ -467,7 +480,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
break;
/* Enqueue allocated buffers */
- if (hw->use_simple_rxtx)
+ if (hw->use_simple_rx)
error = virtqueue_enqueue_recv_refill_simple(vq, m);
else
error = virtqueue_enqueue_recv_refill(vq, m);
@@ -490,31 +503,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-static void
-virtio_update_rxtx_handler(struct rte_eth_dev *dev,
- const struct rte_eth_txconf *tx_conf)
-{
- uint8_t use_simple_rxtx = 0;
- struct virtio_hw *hw = dev->data->dev_private;
-
-#if defined RTE_ARCH_X86
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
- use_simple_rxtx = 1;
-#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
- use_simple_rxtx = 1;
-#endif
- /* Use simple rx/tx func if single segment and no offloads */
- if (use_simple_rxtx &&
- (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO, "Using simple rx/tx path");
- dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- dev->rx_pkt_burst = virtio_recv_pkts_vec;
- hw->use_simple_rxtx = use_simple_rxtx;
- }
-}
-
/*
* struct rte_eth_dev *dev: Used to update dev
* uint16_t nb_desc: Defaults to values read from config space
@@ -534,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_tx *txvq;
uint16_t tx_free_thresh;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- virtio_update_rxtx_handler(dev, tx_conf);
+ /* cannot use simple rxtx funcs with multisegs or offloads */
+ if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
+ hw->use_simple_tx = 0;
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
@@ -563,9 +552,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
vq->vq_free_thresh = tx_free_thresh;
- if (hw->use_simple_rxtx) {
- uint16_t mid_idx = vq->vq_nentries >> 1;
+ dev->data->tx_queues[queue_idx] = txvq;
+ return 0;
+}
+int
+virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t queue_idx)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ uint16_t mid_idx = vq->vq_nentries >> 1;
+ struct virtnet_tx *txvq = &vq->txq;
+ uint16_t desc_idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->use_simple_tx) {
for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
vq->vq_ring.avail->ring[desc_idx] =
desc_idx + mid_idx;
@@ -587,7 +591,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
VIRTQUEUE_DUMP(vq);
- dev->data->tx_queues[queue_idx] = txvq;
return 0;
}
@@ -670,7 +673,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
* In case of SCTP, this will be wrong since it's a CRC
* but there's nothing we can do.
*/
- uint16_t csum, off;
+ uint16_t csum = 0, off;
rte_raw_cksum_mbuf(m, hdr->csum_start,
rte_pktmbuf_pkt_len(m) - hdr->csum_start,
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 28f82d6a..54f1e849 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -54,7 +54,7 @@ struct virtnet_rx {
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
@@ -66,10 +66,10 @@ struct virtnet_tx {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
@@ -81,9 +81,9 @@ struct virtnet_ctl {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
- uint8_t port_id; /**< Device port identifier. */
- const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ uint16_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
};
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 542cf805..b5bc1c49 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -39,7 +39,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
@@ -65,6 +64,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct vring_desc *start_dp;
uint16_t desc_idx;
+ cookie->port = vq->rxq.port_id;
+
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
dxp = &vq->vq_descx[desc_idx];
dxp->cookie = (void *)cookie;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 6b40c7f7..b8b93551 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -43,7 +43,6 @@
#include <rte_ethdev.h>
#include <rte_errno.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 7cf0f8b8..94f65143 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -46,7 +46,6 @@
#include <rte_ethdev.h>
#include <rte_errno.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index f585de8c..689a5cff 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -95,9 +95,9 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
ifr.ifr_flags |= IFF_MULTI_QUEUE;
if (*p_ifname)
- strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ);
+ strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ - 1);
else
- strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ);
+ strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ - 1);
if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) {
PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno));
goto error;
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 4ad7b21b..97bd8326 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -130,6 +130,10 @@ vhost_user_read(int fd, struct vhost_user_msg *msg)
}
sz_payload = msg->size;
+
+ if ((size_t)sz_payload > sizeof(msg->payload))
+ goto fail;
+
if (sz_payload) {
ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
if (ret < sz_payload) {
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 79412714..906d7a2b 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -270,6 +270,8 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ /* For virtio vdev, no need to read counter for clean */
+ eth_dev->intr_handle->efd_counter_size = 0;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index c9614443..7be57ce6 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -40,7 +40,7 @@
#include <rte_malloc.h>
#include <rte_kvargs.h>
#include <rte_ethdev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include "virtio_ethdev.h"
@@ -86,7 +86,11 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
int flags;
flags = fcntl(dev->vhostfd, F_GETFL);
- fcntl(dev->vhostfd, F_SETFL, flags | O_NONBLOCK);
+ if (fcntl(dev->vhostfd, F_SETFL,
+ flags | O_NONBLOCK) == -1) {
+ PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
+ return;
+ }
r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
if (r == 0 || (r < 0 && errno != EAGAIN)) {
dev->status &= (~VIRTIO_NET_S_LINK_UP);
@@ -369,9 +373,9 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
*/
hw->use_msix = 1;
hw->modern = 0;
- hw->use_simple_rxtx = 0;
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
hw->virtio_user_dev = dev;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
return eth_dev;
}
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 9ad77b8a..c3a536f8 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -59,3 +59,28 @@ virtqueue_detatch_unused(struct virtqueue *vq)
}
return NULL;
}
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_flush(struct virtqueue *vq)
+{
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+ uint16_t used_idx, desc_idx;
+ uint16_t nb_used, i;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ for (i = 0; i < nb_used; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ }
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2e120861..2305d91a 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -38,7 +38,6 @@
#include <rte_atomic.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include "virtio_pci.h"
@@ -80,7 +79,7 @@ struct rte_mbuf;
#define VIRTIO_MBUF_ADDR(mb, vq) \
((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
-#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
+#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
#endif
/**
@@ -143,8 +142,8 @@ struct virtio_net_ctrl_mac {
} __attribute__((__packed__));
#define VIRTIO_NET_CTRL_MAC 1
- #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
- #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
/**
* Control VLAN filtering
@@ -204,8 +203,8 @@ struct virtqueue {
struct virtnet_ctl cq;
};
- phys_addr_t vq_ring_mem; /**< physical address of vring,
- * or virtual address for virtio_user. */
+ rte_iova_t vq_ring_mem; /**< physical address of vring,
+ * or virtual address for virtio_user. */
/**
* Head of the free chain in the descriptor table. If
@@ -304,6 +303,9 @@ void virtqueue_dump(struct virtqueue *vq);
*/
struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+/* Flush the elements in the used ring. */
+void virtqueue_flush(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{
@@ -312,6 +314,8 @@ virtqueue_full(const struct virtqueue *vq)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 84356ae2..f09de96e 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -63,6 +63,9 @@ CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 39109919..82d59ca8 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -48,6 +48,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
@@ -87,7 +88,7 @@ static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
-static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
+static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats,
@@ -100,7 +101,7 @@ static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
-static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void vmxnet3_interrupt_handler(void *param);
@@ -309,7 +310,6 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -484,7 +484,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
memset(mz->addr, 0, mz->len);
hw->shared = mz->addr;
- hw->sharedPA = mz->phys_addr;
+ hw->sharedPA = mz->iova;
/*
* Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
@@ -505,7 +505,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
- hw->queueDescPA = mz->phys_addr;
+ hw->queueDescPA = mz->iova;
hw->queue_desc_len = (uint16_t)size;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
@@ -521,7 +521,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
memset(mz->addr, 0, mz->len);
hw->rss_conf = mz->addr;
- hw->rss_confPA = mz->phys_addr;
+ hw->rss_confPA = mz->iova;
}
return 0;
@@ -537,10 +537,10 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
- val = *(const uint32_t *)addr;
+ memcpy(&val, addr, 4);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
- val = (addr[5] << 8) | addr[4];
+ memcpy(&val, addr + 4, 2);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
}
@@ -569,7 +569,7 @@ vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
}
memset(mz->addr, 0, mz->len);
hw->memRegs = mz->addr;
- hw->memRegsPA = mz->phys_addr;
+ hw->memRegsPA = mz->iova;
}
num = hw->num_rx_queues;
@@ -604,7 +604,7 @@ vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
mr->startPA =
- (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
mr->txQueueBits = index[i];
@@ -730,8 +730,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
devRead->rssConfDesc.confPA = hw->rss_confPA;
}
- vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ ret = vmxnet3_dev_vlan_offload_set(dev,
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ if (ret)
+ return ret;
vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
@@ -1034,7 +1036,7 @@ vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
-static void
+static int
vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
@@ -1080,6 +1082,8 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ierrors += rxStats.pktsRxError;
stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
}
+
+ return 0;
}
static void
@@ -1144,6 +1148,8 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
+ ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
+ ether_addr_copy(mac_addr, &dev->data->mac_addrs[0]);
vmxnet3_write_mac(hw, mac_addr->addr_bytes);
}
@@ -1275,7 +1281,7 @@ vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
return 0;
}
-static void
+static int
vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
@@ -1301,6 +1307,8 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
+
+ return 0;
}
static void
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index d2e8323b..a6fa93ac 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -144,7 +144,7 @@ typedef struct vmxnet3_tx_queue {
const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint16_t txdata_desc_size;
} vmxnet3_tx_queue_t;
@@ -179,7 +179,7 @@ typedef struct vmxnet3_rx_queue {
const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
} vmxnet3_rx_queue_t;
#endif /* _VMXNET3_RING_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index d9cf4373..aa396ab2 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -203,6 +203,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_cmd_ring_release(&tq->cmd_ring);
/* Release the memzone */
rte_memzone_free(tq->mz);
+ /* Release the queue */
+ rte_free(tq);
}
}
@@ -223,6 +225,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release the memzone */
rte_memzone_free(rq->mz);
+
+ /* Release the queue */
+ rte_free(rq);
}
}
@@ -265,11 +270,9 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
- if (rq != NULL) {
- /* Release both the cmd_rings mbufs */
- for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
- vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
- }
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
ring0 = &rq->cmd_ring[0];
ring1 = &rq->cmd_ring[1];
@@ -504,13 +507,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
if (copy_size) {
- uint64 offset = txq->cmd_ring.next2fill *
- txq->txdata_desc_size;
+ uint64 offset =
+ (uint64)txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
gdesc->txd.addr =
rte_cpu_to_le_64(txq->data_ring.basePA +
offset);
} else {
- gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
}
gdesc->dword[2] = dw2 | m_seg->data_len;
@@ -618,7 +622,7 @@ vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
*/
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
@@ -848,7 +852,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* Check for hardware stripped VLAN tag */
if (rcd->ts) {
- start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ start->ol_flags |= (PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED);
start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
}
@@ -880,6 +885,23 @@ rcd_done:
}
}
+ if (unlikely(nb_rxd == 0)) {
+ uint32_t avail;
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
+ if (unlikely(avail > 0)) {
+ /* try to alloc new buf and renew descriptors */
+ vmxnet3_post_rx_bufs(rxq, ring_idx);
+ }
+ }
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+ }
+ }
+
return nb_rx;
}
@@ -962,7 +984,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* cmd_ring initialization */
ring->base = mz->addr;
- ring->basePA = mz->phys_addr;
+ ring->basePA = mz->iova;
/* comp_ring initialization */
comp_ring->base = ring->base + ring->size;
@@ -1073,7 +1095,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* cmd_ring0 initialization */
ring0->base = mz->addr;
- ring0->basePA = mz->phys_addr;
+ ring0->basePA = mz->iova;
/* cmd_ring1 initialization */
ring1->base = ring0->base + ring0->size;
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
deleted file mode 100644
index e404b775..00000000
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ /dev/null
@@ -1,766 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <errno.h>
-#include <sys/user.h>
-#ifndef PAGE_SIZE
-#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
-#endif
-#include <linux/binfmts.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <linux/virtio_ring.h>
-
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-#include <rte_string_fns.h>
-#include <rte_vdev.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_etheraddr.h>
-
-#include "rte_xen_lib.h"
-#include "virtqueue.h"
-#include "rte_eth_xenvirt.h"
-
-#define VQ_DESC_NUM 256
-#define VIRTIO_MBUF_BURST_SZ 64
-
-/* virtio_idx is increased after new device is created.*/
-static int virtio_idx = 0;
-
-static struct rte_eth_link pmd_link = {
- .link_speed = ETH_SPEED_NUM_10G,
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_FIXED
-};
-
-static void
-eth_xenvirt_free_queues(struct rte_eth_dev *dev);
-
-static uint16_t
-eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- struct virtqueue *rxvq = q;
- struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num;
- uint32_t len[VIRTIO_MBUF_BURST_SZ];
- uint32_t i;
- struct pmd_internals *pi = rxvq->internals;
-
- nb_used = VIRTQUEUE_NUSED(rxvq);
-
- rte_smp_rmb();
- num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
- num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
- if (unlikely(num == 0)) return 0;
-
- num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
- PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
- for (i = 0; i < num ; i ++) {
- rxm = rx_pkts[i];
- PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
- rxm->next = NULL;
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
- rxm->nb_segs = 1;
- rxm->port = pi->port_id;
- rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
- }
- /* allocate new mbuf for the used descriptor */
- while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
- if (unlikely(new_mbuf == NULL)) {
- break;
- }
- if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) {
- rte_pktmbuf_free_seg(new_mbuf);
- break;
- }
- }
- pi->eth_stats.ipackets += num;
- return num;
-}
-
-static uint16_t
-eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct virtqueue *txvq = tx_queue;
- struct rte_mbuf *txm;
- uint16_t nb_used, nb_tx, num, i;
- int error;
- uint32_t len[VIRTIO_MBUF_BURST_SZ];
- struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
- struct pmd_internals *pi = txvq->internals;
-
- nb_tx = 0;
-
- if (unlikely(nb_pkts == 0))
- return 0;
-
- PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
-
- rte_smp_rmb();
-
- num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
- num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
-
- for (i = 0; i < num ; i ++) {
- /* mergable not supported, one segment only */
- rte_pktmbuf_free_seg(snd_pkts[i]);
- }
-
- while (nb_tx < nb_pkts) {
- if (likely(!virtqueue_full(txvq))) {
- /* TODO drop tx_pkts if it contains multiple segments */
- txm = tx_pkts[nb_tx];
- error = virtqueue_enqueue_xmit(txvq, txm);
- if (unlikely(error)) {
- if (error == ENOSPC)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
- else if (error == EMSGSIZE)
- PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
- else
- PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
- break;
- }
- nb_tx++;
- } else {
- PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
- /* virtqueue_notify not needed in our para-virt solution */
- break;
- }
- }
- pi->eth_stats.opackets += nb_tx;
- return nb_tx;
-}
-
-static int
-eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- RTE_LOG(ERR, PMD, "%s\n", __func__);
- return 0;
-}
-
-/*
- * Create a shared page between guest and host.
- * Host monitors this page if it is cleared on unmap, and then
- * do necessary clean up.
- */
-static void
-gntalloc_vring_flag(int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
- uint32_t gref_tmp;
- void *ptr;
-
- if (grefwatch_from_alloc(&gref_tmp, &ptr)) {
- RTE_LOG(ERR, PMD, "grefwatch_from_alloc error\n");
- exit(0);
- }
-
- *(uint8_t *)ptr = MAP_FLAG;
- snprintf(val_str, sizeof(val_str), "%u", gref_tmp);
- snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"VRING_FLAG_STR, vtidx);
- xenstore_write(key_str, val_str);
-}
-
-/*
- * Notify host this virtio device is started.
- * Host could start polling this device.
- */
-static void
-dev_start_notify(int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
-
- RTE_LOG(INFO, PMD, "%s: virtio %d is started\n", __func__, vtidx);
- gntalloc_vring_flag(vtidx);
-
- snprintf(key_str, sizeof(key_str), "%s%s%d",
- DPDK_XENSTORE_PATH, EVENT_TYPE_START_STR,
- vtidx);
- snprintf(val_str, sizeof(val_str), "1");
- xenstore_write(key_str, val_str);
-}
-
-/*
- * Notify host this virtio device is stopped.
- * Host could stop polling this device.
- */
-static void
-dev_stop_notify(int vtidx)
-{
- RTE_SET_USED(vtidx);
-}
-
-
-static int
-update_mac_address(struct ether_addr *mac_addrs, int vtidx)
-{
- char key_str[PATH_MAX];
- char val_str[PATH_MAX];
- int rv;
-
- if (mac_addrs == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer mac specified\n", __func__);
- return -1;
- }
- rv = snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d_ether_addr", vtidx);
- if (rv == -1)
- return rv;
- rv = snprintf(val_str, sizeof(val_str), "%02x:%02x:%02x:%02x:%02x:%02x",
- mac_addrs->addr_bytes[0],
- mac_addrs->addr_bytes[1],
- mac_addrs->addr_bytes[2],
- mac_addrs->addr_bytes[3],
- mac_addrs->addr_bytes[4],
- mac_addrs->addr_bytes[5]);
- if (rv == -1)
- return rv;
- if (xenstore_write(key_str, val_str))
- return rv;
- return 0;
-}
-
-
-static int
-eth_dev_start(struct rte_eth_dev *dev)
-{
- struct virtqueue *rxvq = dev->data->rx_queues[0];
- struct virtqueue *txvq = dev->data->tx_queues[0];
- struct rte_mbuf *m;
- struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
- int rv;
-
- dev->data->dev_link.link_status = ETH_LINK_UP;
- while (!virtqueue_full(rxvq)) {
- m = rte_mbuf_raw_alloc(rxvq->mpool);
- if (m == NULL)
- break;
- /* Enqueue allocated buffers. */
- if (virtqueue_enqueue_recv_refill(rxvq, m)) {
- rte_pktmbuf_free_seg(m);
- break;
- }
- }
-
- rxvq->internals = pi;
- txvq->internals = pi;
-
- rv = update_mac_address(dev->data->mac_addrs, pi->virtio_idx);
- if (rv)
- return -1;
- dev_start_notify(pi->virtio_idx);
-
- return 0;
-}
-
-static void
-eth_dev_stop(struct rte_eth_dev *dev)
-{
- struct pmd_internals *pi = (struct pmd_internals *)dev->data->dev_private;
-
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
- dev_stop_notify(pi->virtio_idx);
-}
-
-/*
- * Notify host this virtio device is closed.
- * Host could do necessary clean up to this device.
- */
-static void
-eth_dev_close(struct rte_eth_dev *dev)
-{
- eth_xenvirt_free_queues(dev);
-}
-
-static void
-eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
-{
- struct pmd_internals *internals = dev->data->dev_private;
-
- RTE_SET_USED(internals);
- dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t)2048;
- dev_info->max_rx_queues = (uint16_t)1;
- dev_info->max_tx_queues = (uint16_t)1;
- dev_info->min_rx_bufsize = 0;
-}
-
-static void
-eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- if(stats)
- rte_memcpy(stats, &internals->eth_stats, sizeof(*stats));
-}
-
-static void
-eth_stats_reset(struct rte_eth_dev *dev)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- /* Reset software totals */
- memset(&internals->eth_stats, 0, sizeof(internals->eth_stats));
-}
-
-static void
-eth_queue_release(void *q)
-{
- rte_free(q);
-}
-
-static int
-eth_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
-{
- return 0;
-}
-
-/*
- * Create shared vring between guest and host.
- * Memory is allocated through grant alloc driver, so it is not physical continuous.
- */
-static void *
-gntalloc_vring_create(int queue_type, uint32_t size, int vtidx)
-{
- char key_str[PATH_MAX] = {0};
- char val_str[PATH_MAX] = {0};
- void *va = NULL;
- int pg_size;
- uint32_t pg_num;
- uint32_t *gref_arr = NULL;
- phys_addr_t *pa_arr = NULL;
- uint64_t start_index;
- int rv;
-
- pg_size = getpagesize();
- size = RTE_ALIGN_CEIL(size, pg_size);
- pg_num = size / pg_size;
-
- gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
- pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
-
- if (gref_arr == NULL || pa_arr == NULL) {
- RTE_LOG(ERR, PMD, "%s: calloc failed\n", __func__);
- goto out;
- }
-
- va = gntalloc(size, gref_arr, &start_index);
- if (va == NULL) {
- RTE_LOG(ERR, PMD, "%s: gntalloc failed\n", __func__);
- goto out;
- }
-
- if (get_phys_map(va, pa_arr, pg_num, pg_size))
- goto out;
-
- /* write in xenstore gref and pfn for each page of vring */
- if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
- gntfree(va, size, start_index);
- va = NULL;
- goto out;
- }
-
- if (queue_type == VTNET_RQ)
- rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"RXVRING_XENSTORE_STR, vtidx);
- else
- rv = snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"TXVRING_XENSTORE_STR, vtidx);
- if (rv == -1 || xenstore_write(key_str, val_str) == -1) {
- gntfree(va, size, start_index);
- va = NULL;
- }
-out:
- free(pa_arr);
- free(gref_arr);
-
- return va;
-}
-
-
-
-static struct virtqueue *
-virtio_queue_setup(struct rte_eth_dev *dev, int queue_type)
-{
- struct virtqueue *vq = NULL;
- uint16_t vq_size = VQ_DESC_NUM;
- int i = 0;
- char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- size_t size;
- struct vring *vr;
-
- /* Allocate memory for virtqueue. */
- if (queue_type == VTNET_RQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_rvq",
- dev->data->port_id);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
- if (vq == NULL) {
- RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
- return NULL;
- }
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- } else if(queue_type == VTNET_TQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_tvq",
- dev->data->port_id);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
- if (vq == NULL) {
- RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
- return NULL;
- }
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- }
-
- memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
-
- vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
- vq->vq_nentries = vq_size;
- vq->vq_free_cnt = vq_size;
- /* Calcuate vring size according to virtio spec */
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
- /* Allocate memory for virtio vring through gntalloc driver*/
- vq->vq_ring_virt_mem = gntalloc_vring_create(queue_type, vq->vq_ring_size,
- ((struct pmd_internals *)dev->data->dev_private)->virtio_idx);
- memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
- vr = &vq->vq_ring;
- vring_init(vr, vq_size, vq->vq_ring_virt_mem, vq->vq_alignment);
- /*
- * Locally maintained last consumed index, this idex trails
- * vq_ring.used->idx.
- */
- vq->vq_used_cons_idx = 0;
- vq->vq_desc_head_idx = 0;
- vq->vq_free_cnt = vq->vq_nentries;
- memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- /* Chain all the descriptors in the ring with an END */
- for (i = 0; i < vq_size - 1; i++)
- vr->desc[i].next = (uint16_t)(i + 1);
- vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
-
- return vq;
-}
-
-static int
-eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- struct virtqueue *vq;
- vq = dev->data->rx_queues[rx_queue_id] = virtio_queue_setup(dev, VTNET_RQ);
- vq->mpool = mb_pool;
- return 0;
-}
-
-static int
-eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- dev->data->tx_queues[tx_queue_id] = virtio_queue_setup(dev, VTNET_TQ);
- return 0;
-}
-
-static void
-eth_xenvirt_free_queues(struct rte_eth_dev *dev)
-{
- int i;
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_queue_release(dev->data->rx_queues[i]);
- dev->data->rx_queues[i] = NULL;
- }
- dev->data->nb_rx_queues = 0;
-
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_queue_release(dev->data->tx_queues[i]);
- dev->data->tx_queues[i] = NULL;
- }
- dev->data->nb_tx_queues = 0;
-}
-
-static const struct eth_dev_ops ops = {
- .dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
- .dev_close = eth_dev_close,
- .dev_configure = eth_dev_configure,
- .dev_infos_get = eth_dev_info,
- .rx_queue_setup = eth_rx_queue_setup,
- .tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
- .link_update = eth_link_update,
- .stats_get = eth_stats_get,
- .stats_reset = eth_stats_reset,
-};
-
-
-static int
-rte_eth_xenvirt_parse_args(struct xenvirt_dict *dict,
- const char *name, const char *params)
-{
- int i;
- char *pairs[RTE_ETH_XENVIRT_MAX_ARGS];
- int num_of_pairs;
- char *pair[2];
- char *args;
- int ret = -1;
-
- if (params == NULL)
- return 0;
-
- args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE);
- if (args == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name);
- return -1;
- }
- rte_memcpy(args, params, strlen(params));
-
- num_of_pairs = rte_strsplit(args, strnlen(args, MAX_ARG_STRLEN),
- pairs,
- RTE_ETH_XENVIRT_MAX_ARGS ,
- RTE_ETH_XENVIRT_PAIRS_DELIM);
-
- for (i = 0; i < num_of_pairs; i++) {
- pair[0] = NULL;
- pair[1] = NULL;
- rte_strsplit(pairs[i], strnlen(pairs[i], MAX_ARG_STRLEN),
- pair, 2,
- RTE_ETH_XENVIRT_KEY_VALUE_DELIM);
-
- if (pair[0] == NULL || pair[1] == NULL || pair[0][0] == 0
- || pair[1][0] == 0) {
- RTE_LOG(ERR, PMD,
- "Couldn't parse %s device,"
- "wrong key or value \n", name);
- goto err;
- }
-
- if (!strncmp(pair[0], RTE_ETH_XENVIRT_MAC_PARAM,
- sizeof(RTE_ETH_XENVIRT_MAC_PARAM))) {
- if (cmdline_parse_etheraddr(NULL,
- pair[1],
- &dict->addr,
- sizeof(dict->addr)) < 0) {
- RTE_LOG(ERR, PMD,
- "Invalid %s device ether address\n",
- name);
- goto err;
- }
-
- dict->addr_valid = 1;
- }
- }
-
- ret = 0;
-err:
- rte_free(args);
- return ret;
-}
-
-enum dev_action {
- DEV_CREATE,
- DEV_ATTACH
-};
-
-static struct rte_vdev_driver pmd_xenvirt_drv;
-
-static int
-eth_dev_xenvirt_create(const char *name, const char *params,
- const unsigned numa_node,
- enum dev_action action)
-{
- struct rte_eth_dev_data *data = NULL;
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- struct xenvirt_dict dict;
-
- memset(&dict, 0, sizeof(struct xenvirt_dict));
-
- RTE_LOG(INFO, PMD, "Creating virtio rings backed ethdev on numa socket %u\n",
- numa_node);
- RTE_SET_USED(action);
-
- if (rte_eth_xenvirt_parse_args(&dict, name, params) < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to parse ethdev parameters\n", __func__);
- return -1;
- }
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto err;
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
- if (internals == NULL)
- goto err;
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL)
- goto err;
-
- data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
- data->nb_rx_queues = (uint16_t)1;
- data->nb_tx_queues = (uint16_t)1;
- data->dev_link = pmd_link;
- data->mac_addrs = rte_zmalloc("xen_virtio", ETHER_ADDR_LEN, 0);
-
- if(dict.addr_valid)
- memcpy(&data->mac_addrs->addr_bytes, &dict.addr, sizeof(struct ether_addr));
- else
- eth_random_addr(&data->mac_addrs->addr_bytes[0]);
-
- eth_dev->data = data;
- eth_dev->dev_ops = &ops;
-
- eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->numa_node = numa_node;
-
- eth_dev->rx_pkt_burst = eth_xenvirt_rx;
- eth_dev->tx_pkt_burst = eth_xenvirt_tx;
-
- internals->virtio_idx = virtio_idx++;
- internals->port_id = eth_dev->data->port_id;
-
- return 0;
-
-err:
- rte_free(data);
- rte_free(internals);
-
- return -1;
-}
-
-
-static int
-eth_dev_xenvirt_free(const char *name, const unsigned numa_node)
-{
- struct rte_eth_dev *eth_dev = NULL;
-
- RTE_LOG(DEBUG, PMD,
- "Free virtio rings backed ethdev on numa socket %u\n",
- numa_node);
-
- /* find an ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev == NULL)
- return -1;
-
- if (eth_dev->data->dev_started == 1) {
- eth_dev_stop(eth_dev);
- eth_dev_close(eth_dev);
- }
-
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->dev_ops = NULL;
-
- rte_free(eth_dev->data);
- rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data->mac_addrs);
-
- virtio_idx--;
-
- return 0;
-}
-
-/*TODO: Support multiple process model */
-static int
-rte_pmd_xenvirt_probe(struct rte_vdev_device *dev)
-{
- if (virtio_idx == 0) {
- if (xenstore_init() != 0) {
- RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
- return -1;
- }
- if (gntalloc_open() != 0) {
- RTE_LOG(ERR, PMD, "%s: grant init failed\n", __func__);
- return -1;
- }
- }
- eth_dev_xenvirt_create(rte_vdev_device_name(dev),
- rte_vdev_device_args(dev), rte_socket_id(), DEV_CREATE);
- return 0;
-}
-
-static int
-rte_pmd_xenvirt_remove(struct rte_vdev_device *dev)
-{
- eth_dev_xenvirt_free(rte_vdev_device_name(dev), rte_socket_id());
-
- if (virtio_idx == 0) {
- if (xenstore_uninit() != 0)
- RTE_LOG(ERR, PMD, "%s: xenstore uninit failed\n", __func__);
-
- gntalloc_close();
- }
- return 0;
-}
-
-static struct rte_vdev_driver pmd_xenvirt_drv = {
- .probe = rte_pmd_xenvirt_probe,
- .remove = rte_pmd_xenvirt_remove,
-};
-
-RTE_PMD_REGISTER_VDEV(net_xenvirt, pmd_xenvirt_drv);
-RTE_PMD_REGISTER_ALIAS(net_xenvirt, eth_xenvirt);
-RTE_PMD_REGISTER_PARAM_STRING(net_xenvirt,
- "mac=<mac addr>");
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt_version.map b/drivers/net/xenvirt/rte_eth_xenvirt_version.map
deleted file mode 100644
index dd636f72..00000000
--- a/drivers/net/xenvirt/rte_eth_xenvirt_version.map
+++ /dev/null
@@ -1,7 +0,0 @@
-DPDK_2.0 {
- global:
-
- rte_mempool_gntalloc_create;
-
- local: *;
-};
diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c
deleted file mode 100644
index 73e82f80..00000000
--- a/drivers/net/xenvirt/rte_mempool_gntalloc.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <string.h>
-#include <xen/sys/gntalloc.h>
-
-#include <rte_common.h>
-#include <rte_mempool.h>
-#include <rte_memory.h>
-#include <rte_errno.h>
-
-#include "rte_xen_lib.h"
-#include "rte_eth_xenvirt.h"
-
-struct _gntarr {
- uint32_t gref;
- phys_addr_t pa;
- uint64_t index;
- void *va;
-};
-
-struct _mempool_gntalloc_info {
- struct rte_mempool *mp;
- uint32_t pg_num;
- uint32_t *gref_arr;
- phys_addr_t *pa_arr;
- void *va;
- uint32_t mempool_idx;
- uint64_t start_index;
-};
-
-
-static rte_atomic32_t global_xenvirt_mempool_idx = RTE_ATOMIC32_INIT(-1);
-
-static int
-compare(const void *p1, const void *p2)
-{
- return ((const struct _gntarr *)p1)->pa - ((const struct _gntarr *)p2)->pa;
-}
-
-
-static struct _mempool_gntalloc_info
-_create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- struct _mempool_gntalloc_info mgi;
- struct rte_mempool *mp = NULL;
- struct rte_mempool_objsz objsz;
- uint32_t pg_num, rpg_num, pg_shift, pg_sz;
- char *va, *orig_va, *uv; /* uv: from which, the pages could be freed */
- ssize_t sz, usz; /* usz: unused size */
- /*
- * for each page allocated through xen_gntalloc driver,
- * gref_arr:stores grant references,
- * pa_arr: stores physical address,
- * gnt_arr: stores all meta dat
- */
- uint32_t *gref_arr = NULL;
- phys_addr_t *pa_arr = NULL;
- struct _gntarr *gnt_arr = NULL;
- /* start index of the grant referances, used for dealloc*/
- uint64_t start_index;
- uint32_t i, j;
- int rv = 0;
- struct ioctl_gntalloc_dealloc_gref arg;
-
- mgi.mp = NULL;
- va = orig_va = uv = NULL;
- pg_num = rpg_num = 0;
- sz = 0;
-
- pg_sz = getpagesize();
- if (rte_is_power_of_2(pg_sz) == 0) {
- goto out;
- }
- pg_shift = rte_bsf32(pg_sz);
-
- rte_mempool_calc_obj_size(elt_size, flags, &objsz);
- sz = rte_mempool_xmem_size(elt_num, objsz.total_size, pg_shift);
- pg_num = sz >> pg_shift;
-
- pa_arr = calloc(pg_num, sizeof(pa_arr[0]));
- gref_arr = calloc(pg_num, sizeof(gref_arr[0]));
- gnt_arr = calloc(pg_num, sizeof(gnt_arr[0]));
- if ((gnt_arr == NULL) || (gref_arr == NULL) || (pa_arr == NULL))
- goto out;
-
- /* grant index is continuous in ascending order */
- orig_va = gntalloc(sz, gref_arr, &start_index);
- if (orig_va == NULL)
- goto out;
-
- get_phys_map(orig_va, pa_arr, pg_num, pg_sz);
- for (i = 0; i < pg_num; i++) {
- gnt_arr[i].index = start_index + i * pg_sz;
- gnt_arr[i].gref = gref_arr[i];
- gnt_arr[i].pa = pa_arr[i];
- gnt_arr[i].va = RTE_PTR_ADD(orig_va, i * pg_sz);
- }
- qsort(gnt_arr, pg_num, sizeof(struct _gntarr), compare);
-
- va = get_xen_virtual(sz, pg_sz);
- if (va == NULL) {
- goto out;
- }
-
- /*
- * map one by one, as index isn't continuous now.
- * pg_num VMAs, doesn't linux has a limitation on this?
- */
- for (i = 0; i < pg_num; i++) {
- /* update gref_arr and pa_arr after sort */
- gref_arr[i] = gnt_arr[i].gref;
- pa_arr[i] = gnt_arr[i].pa;
- gnt_arr[i].va = mmap(va + i * pg_sz, pg_sz, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED, gntalloc_fd, gnt_arr[i].index);
- if ((gnt_arr[i].va == MAP_FAILED) || (gnt_arr[i].va != (va + i * pg_sz))) {
- RTE_LOG(ERR, PMD, "failed to map %d pages\n", i);
- goto mmap_failed;
- }
- }
-
- /*
- * Check that allocated size is big enough to hold elt_num
- * objects and a calcualte how many bytes are actually required.
- */
- usz = rte_mempool_xmem_usage(va, elt_num, objsz.total_size, pa_arr, pg_num, pg_shift);
- if (usz < 0) {
- mp = NULL;
- i = pg_num;
- goto mmap_failed;
- } else {
- /* unmap unused pages if any */
- uv = RTE_PTR_ADD(va, usz);
- if ((usz = va + sz - uv) > 0) {
-
- RTE_LOG(ERR, PMD,
- "%s(%s): unmap unused %zu of %zu "
- "mmaped bytes @%p orig:%p\n",
- __func__, name, usz, sz, uv, va);
- munmap(uv, usz);
- i = (sz - usz) / pg_sz;
- for (; i < pg_num; i++) {
- arg.count = 1;
- arg.index = gnt_arr[i].index;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
- if (rv) {
- /* shouldn't fail here */
- RTE_LOG(ERR, PMD, "va=%p pa=%"PRIu64"x index=%"PRIu64" %s\n",
- gnt_arr[i].va,
- gnt_arr[i].pa,
- arg.index, strerror(errno));
- rte_panic("gntdealloc failed when freeing pages\n");
- }
- }
-
- rpg_num = (sz - usz) >> pg_shift;
- } else
- rpg_num = pg_num;
-
- mp = rte_mempool_xmem_create(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags, va, pa_arr, rpg_num, pg_shift);
-
- RTE_ASSERT(elt_num == mp->size);
- }
- mgi.mp = mp;
- mgi.pg_num = rpg_num;
- mgi.gref_arr = gref_arr;
- mgi.pa_arr = pa_arr;
- if (mp)
- mgi.mempool_idx = rte_atomic32_add_return(&global_xenvirt_mempool_idx, 1);
- mgi.start_index = start_index;
- mgi.va = va;
-
- if (mp == NULL) {
- i = pg_num;
- goto mmap_failed;
- }
-
-/*
- * unmap only, without deallocate grant reference.
- * unused pages have already been unmaped,
- * unmap twice will fail, but it is safe.
- */
-mmap_failed:
- for (j = 0; j < i; j++) {
- if (gnt_arr[i].va)
- munmap(gnt_arr[i].va, pg_sz);
- }
-out:
- free(gnt_arr);
- if (orig_va)
- munmap(orig_va, sz);
- if (mp == NULL) {
- free(gref_arr);
- free(pa_arr);
-
- /* some gref has already been de-allocated from the list in the driver,
- * so dealloc one by one, and it is safe to deallocate twice
- */
- if (orig_va) {
- for (i = 0; i < pg_num; i++) {
- arg.index = start_index + i * pg_sz;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
- }
- }
- }
- return mgi;
-}
-
-struct rte_mempool *
-rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- int rv;
- uint32_t i;
- struct _mempool_gntalloc_info mgi;
- struct ioctl_gntalloc_dealloc_gref arg;
- int pg_sz = getpagesize();
-
- mgi = _create_mempool(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
- if (mgi.mp) {
- rv = grant_gntalloc_mbuf_pool(mgi.mp,
- mgi.pg_num,
- mgi.gref_arr,
- mgi.pa_arr,
- mgi.mempool_idx);
- free(mgi.gref_arr);
- free(mgi.pa_arr);
- if (rv == 0)
- return mgi.mp;
- /*
- * in _create_mempool, unused pages have already been unmapped, deallocagted
- * unmap and dealloc the remained ones here.
- */
- munmap(mgi.va, pg_sz * mgi.pg_num);
- for (i = 0; i < mgi.pg_num; i++) {
- arg.index = mgi.start_index + i * pg_sz;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg);
- }
- return NULL;
- }
- return NULL;
-
-
-
-}
diff --git a/drivers/net/xenvirt/rte_xen_lib.c b/drivers/net/xenvirt/rte_xen_lib.c
deleted file mode 100644
index 6c9a1d49..00000000
--- a/drivers/net/xenvirt/rte_xen_lib.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <xen/sys/gntalloc.h>
-
-#include <rte_common.h>
-#include <rte_string_fns.h>
-#include <rte_malloc.h>
-
-#include "rte_xen_lib.h"
-
-/*
- * The grant node format in xenstore for vring/mpool is:
- * 0_rx_vring_gref = "gref1#, gref2#, gref3#"
- * 0_mempool_gref = "gref1#, gref2#, gref3#"
- * each gref# is a grant reference for a shared page.
- * In each shared page, we store the grant_node_item items.
- */
-struct grant_node_item {
- uint32_t gref;
- uint32_t pfn;
-} __attribute__((packed));
-
-/* fd for xen_gntalloc driver, used to allocate grant pages*/
-int gntalloc_fd = -1;
-
-/* xenstore path for local domain, now it is '/local/domain/domid/' */
-static char *dompath = NULL;
-/* handle to xenstore read/write operations */
-static struct xs_handle *xs = NULL;
-/* flag to indicate if xenstore cleanup is required */
-static bool is_xenstore_cleaned_up;
-
-/*
- * Reserve a virtual address space.
- * On success, returns the pointer. On failure, returns NULL.
- */
-void *
-get_xen_virtual(size_t size, size_t page_sz)
-{
- void *addr;
- uintptr_t aligned_addr;
-
- addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "failed get a virtual area\n");
- return NULL;
- }
-
- aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
- addr = (void *)(aligned_addr);
-
- return addr;
-}
-
-/*
- * Get the physical address for virtual memory starting at va.
- */
-int
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
-{
- int32_t fd, rc = 0;
- uint32_t i, nb;
- off_t ofs;
-
- ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
- nb = pg_num * sizeof(*pa);
-
- if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0 ||
- (rc = pread(fd, pa, nb, ofs)) < 0 ||
- (rc -= nb) != 0) {
- RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' "
- "at offset %lu, error code: %d\n",
- __func__, nb, PAGEMAP_FNAME, (unsigned long)ofs, errno);
- rc = ENOENT;
- }
-
- close(fd);
- for (i = 0; i != pg_num; i++)
- pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
-
- return rc;
-}
-
-int
-gntalloc_open(void)
-{
- gntalloc_fd = open(XEN_GNTALLOC_FNAME, O_RDWR);
- return (gntalloc_fd != -1) ? 0 : -1;
-}
-
-void
-gntalloc_close(void)
-{
- if (gntalloc_fd != -1)
- close(gntalloc_fd);
- gntalloc_fd = -1;
-}
-
-void *
-gntalloc(size_t size, uint32_t *gref, uint64_t *start_index)
-{
- int page_size = getpagesize();
- uint32_t i, pg_num;
- void *va;
- int rv;
- struct ioctl_gntalloc_alloc_gref *arg;
- struct ioctl_gntalloc_dealloc_gref arg_d;
-
- if (size % page_size) {
- RTE_LOG(ERR, PMD, "%s: %zu isn't multiple of page size\n",
- __func__, size);
- return NULL;
- }
-
- pg_num = size / page_size;
- arg = malloc(sizeof(*arg) + (pg_num - 1) * sizeof(uint32_t));
- if (arg == NULL)
- return NULL;
- arg->domid = DOM0_DOMID;
- arg->flags = GNTALLOC_FLAG_WRITABLE;
- arg->count = pg_num;
-
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, arg);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
- free(arg);
- return NULL;
- }
-
- va = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gntalloc_fd, arg->index);
- if (va == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
- arg_d.count = pg_num;
- arg_d.index = arg->index;
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, arg_d);
- free(arg);
- return NULL;
- }
-
- if (gref) {
- for (i = 0; i < pg_num; i++) {
- gref[i] = arg->gref_ids[i];
- }
- }
- if (start_index)
- *start_index = arg->index;
-
- free(arg);
-
- return va;
-}
-
-int
-grefwatch_from_alloc(uint32_t *gref, void **pptr)
-{
- int rv;
- void *ptr;
- int pg_size = getpagesize();
- struct ioctl_gntalloc_alloc_gref arg = {
- .domid = DOM0_DOMID,
- .flags = GNTALLOC_FLAG_WRITABLE,
- .count = 1
- };
- struct ioctl_gntalloc_dealloc_gref arg_d;
- struct ioctl_gntalloc_unmap_notify notify = {
- .action = UNMAP_NOTIFY_CLEAR_BYTE
- };
-
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_ALLOC_GREF, &arg);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: ioctl error\n", __func__);
- return -1;
- }
-
- ptr = (void *)mmap(NULL, pg_size, PROT_READ|PROT_WRITE, MAP_SHARED, gntalloc_fd, arg.index);
- arg_d.index = arg.index;
- arg_d.count = 1;
- if (ptr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "%s: mmap failed\n", __func__);
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- return -1;
- }
- if (pptr)
- *pptr = ptr;
- if (gref)
- *gref = arg.gref_ids[0];
-
- notify.index = arg.index;
- rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_SET_UNMAP_NOTIFY, &notify);
- if (rv) {
- RTE_LOG(ERR, PMD, "%s: unmap notify failed\n", __func__);
- munmap(ptr, pg_size);
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- return -1;
- }
-
- return 0;
-}
-
-void
-gntfree(void *va, size_t sz, uint64_t start_index)
-{
- struct ioctl_gntalloc_dealloc_gref arg_d;
-
- if (va && sz) {
- munmap(va, sz);
- arg_d.count = sz / getpagesize();
- arg_d.index = start_index;
- ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg_d);
- }
-}
-
-static int
-xenstore_cleanup(void)
-{
- char store_path[PATH_MAX] = {0};
-
- if (snprintf(store_path, sizeof(store_path),
- "%s%s", dompath, DPDK_XENSTORE_NODE) == -1)
- return -1;
-
- if (xs_rm(xs, XBT_NULL, store_path) == false) {
- RTE_LOG(ERR, PMD, "%s: failed cleanup node\n", __func__);
- return -1;
- }
-
- return 0;
-}
-
-int
-xenstore_init(void)
-{
- unsigned int len, domid;
- char *buf;
- char *end;
-
- xs = xs_domain_open();
- if (xs == NULL) {
- RTE_LOG(ERR, PMD,"%s: xs_domain_open failed\n", __func__);
- return -1;
- }
- buf = xs_read(xs, XBT_NULL, "domid", &len);
- if (buf == NULL) {
- RTE_LOG(ERR, PMD, "%s: failed read domid\n", __func__);
- return -1;
- }
- errno = 0;
- domid = strtoul(buf, &end, 0);
- if (errno != 0 || end == NULL || end == buf || domid == 0)
- return -1;
-
- RTE_LOG(INFO, PMD, "retrieved dom ID = %d\n", domid);
-
- dompath = xs_get_domain_path(xs, domid);
- if (dompath == NULL)
- return -1;
-
- xs_transaction_start(xs); /* When to stop transaction */
-
- if (is_xenstore_cleaned_up == 0) {
- if (xenstore_cleanup())
- return -1;
- is_xenstore_cleaned_up = 1;
- }
-
- return 0;
-}
-
-int
-xenstore_uninit(void)
-{
- xs_close(xs);
-
- if (is_xenstore_cleaned_up == 0) {
- if (xenstore_cleanup())
- return -1;
- is_xenstore_cleaned_up = 1;
- }
- free(dompath);
- dompath = NULL;
-
- return 0;
-}
-
-int
-xenstore_write(const char *key_str, const char *val_str)
-{
- char grant_path[PATH_MAX];
- int rv, len;
-
- if (xs == NULL) {
- RTE_LOG(ERR, PMD, "%s: xenstore init failed\n", __func__);
- return -1;
- }
- rv = snprintf(grant_path, sizeof(grant_path), "%s%s", dompath, key_str);
- if (rv == -1) {
- RTE_LOG(ERR, PMD, "%s: snprintf %s %s failed\n",
- __func__, dompath, key_str);
- return -1;
- }
- len = strnlen(val_str, PATH_MAX);
-
- if (xs_write(xs, XBT_NULL, grant_path, val_str, len) == false) {
- RTE_LOG(ERR, PMD, "%s: xs_write failed\n", __func__);
- return -1;
- }
-
- return 0;
-}
-
-int
-grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size)
-{
- uint64_t start_index;
- int pg_size;
- uint32_t pg_shift;
- void *ptr = NULL;
- uint32_t count, entries_per_pg;
- uint32_t i, j = 0, k = 0;
- uint32_t *gref_tmp;
- int first = 1;
- char tmp_str[PATH_MAX] = {0};
- int rv = -1;
-
- pg_size = getpagesize();
- if (rte_is_power_of_2(pg_size) == 0) {
- return -1;
- }
- pg_shift = rte_bsf32(pg_size);
- if (pg_size % sizeof(struct grant_node_item)) {
- RTE_LOG(ERR, PMD, "pg_size isn't a multiple of grant node item\n");
- return -1;
- }
-
- entries_per_pg = pg_size / sizeof(struct grant_node_item);
- count = (pg_num + entries_per_pg - 1 ) / entries_per_pg;
- gref_tmp = malloc(count * sizeof(uint32_t));
- if (gref_tmp == NULL)
- return -1;
- ptr = gntalloc(pg_size * count, gref_tmp, &start_index);
- if (ptr == NULL) {
- RTE_LOG(ERR, PMD, "%s: gntalloc error of %d pages\n", __func__, count);
- free(gref_tmp);
- return -1;
- }
-
- while (j < pg_num) {
- if (first) {
- rv = snprintf(val_str, str_size, "%u", gref_tmp[k]);
- first = 0;
- } else {
- snprintf(tmp_str, PATH_MAX, "%s", val_str);
- rv = snprintf(val_str, str_size, "%s,%u", tmp_str, gref_tmp[k]);
- }
- k++;
- if (rv == -1)
- break;
-
- for (i = 0; i < entries_per_pg && j < pg_num ; i++) {
- ((struct grant_node_item *)ptr)->gref = gref_arr[j];
- ((struct grant_node_item *)ptr)->pfn = pa_arr[j] >> pg_shift;
- ptr = RTE_PTR_ADD(ptr, sizeof(struct grant_node_item));
- j++;
- }
- }
- if (rv == -1) {
- gntfree(ptr, pg_size * count, start_index);
- } else
- rv = 0;
- free(gref_tmp);
- return rv;
-}
-
-
-int
-grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx)
-{
- char key_str[PATH_MAX] = {0};
- char val_str[PATH_MAX] = {0};
- void *mempool_obj_va;
-
- if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
- return -1;
- }
-
- if (snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"MEMPOOL_XENSTORE_STR, mempool_idx) == -1)
- return -1;
- if (xenstore_write(key_str, val_str) == -1)
- return -1;
-
- if (snprintf(key_str, sizeof(key_str),
- DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1)
- return -1;
- if (mpool->nb_mem_chunks != 1) {
- RTE_LOG(ERR, PMD,
- "mempool with more than 1 chunk is not supported\n");
- return -1;
- }
- mempool_obj_va = STAILQ_FIRST(&mpool->mem_list)->addr;
- if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR,
- (uintptr_t)mempool_obj_va) == -1)
- return -1;
- if (xenstore_write(key_str, val_str) == -1)
- return -1;
-
- return 0;
-}
diff --git a/drivers/net/xenvirt/rte_xen_lib.h b/drivers/net/xenvirt/rte_xen_lib.h
deleted file mode 100644
index d973eacb..00000000
--- a/drivers/net/xenvirt/rte_xen_lib.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_XEN_DUMMY_PMD_H
-#define _RTE_XEN_DUMMY_PMD_H
-
-#include <stdint.h>
-
-#include <rte_common.h>
-#include <rte_mempool.h>
-#include <rte_ether.h>
-
-#define PAGEMAP_FNAME "/proc/self/pagemap"
-#define XEN_GNTALLOC_FNAME "/dev/xen/gntalloc"
-#define DPDK_XENSTORE_PATH "/control/dpdk/"
-#define DPDK_XENSTORE_NODE "/control/dpdk"
-/*format 0_mempool_gref = "1537,1524,1533" */
-#define MEMPOOL_XENSTORE_STR "_mempool_gref"
-/*format 0_mempool_va = 0x80340000 */
-#define MEMPOOL_VA_XENSTORE_STR "_mempool_va"
-/*format 0_rx_vring_gref = "1537,1524,1533" */
-#define RXVRING_XENSTORE_STR "_rx_vring_gref"
-/*format 0_tx_vring_gref = "1537,1524,1533" */
-#define TXVRING_XENSTORE_STR "_tx_vring_gref"
-#define VRING_FLAG_STR "_vring_flag"
-/*format: event_type_start_0 = 1*/
-#define EVENT_TYPE_START_STR "event_type_start_"
-
-#define DOM0_DOMID 0
-/*
- * the pfn (page frame number) are bits 0-54 (see pagemap.txt in linux
- * Documentation).
- */
-#define PAGEMAP_PFN_BITS 54
-#define PAGEMAP_PFN_MASK RTE_LEN2MASK(PAGEMAP_PFN_BITS, phys_addr_t)
-
-#define MAP_FLAG 0xA5
-
-#define RTE_ETH_XENVIRT_PAIRS_DELIM ';'
-#define RTE_ETH_XENVIRT_KEY_VALUE_DELIM '='
-#define RTE_ETH_XENVIRT_MAX_ARGS 1
-#define RTE_ETH_XENVIRT_MAC_PARAM "mac"
-struct xenvirt_dict {
- uint8_t addr_valid;
- struct ether_addr addr;
-};
-
-extern int gntalloc_fd;
-
-int
-gntalloc_open(void);
-
-void
-gntalloc_close(void);
-
-void *
-gntalloc(size_t sz, uint32_t *gref, uint64_t *start_index);
-
-void
-gntfree(void *va, size_t sz, uint64_t start_index);
-
-int
-xenstore_init(void);
-
-int
-xenstore_uninit(void);
-
-int
-xenstore_write(const char *key_str, const char *val_str);
-
-int
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz);
-
-void *
-get_xen_virtual(size_t size, size_t page_sz);
-
-int
-grefwatch_from_alloc(uint32_t *gref, void **pptr);
-
-
-int grant_node_create(uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, char *val_str, size_t str_size);
-
-int
-grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *gref_arr, phys_addr_t *pa_arr, int mempool_idx);
-
-#endif
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
deleted file mode 100644
index 1bb6877c..00000000
--- a/drivers/net/xenvirt/virtqueue.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTQUEUE_H_
-#define _VIRTQUEUE_H_
-
-#include <stdint.h>
-#include <linux/virtio_ring.h>
-#include <linux/virtio_net.h>
-
-#include <rte_atomic.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_mempool.h>
-
-#include "virtio_logs.h"
-
-struct rte_mbuf;
-
-/* The alignment to use between consumer and producer parts of vring. */
-#define VIRTIO_PCI_VRING_ALIGN 4096
-
-enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
-
-/**
- * The maximum virtqueue size is 2^15. Use that value as the end of
- * descriptor chain terminator since it will never be a valid index
- * in the descriptor table. This is used to verify we are correctly
- * handling vq_free_cnt.
- */
-#define VQ_RING_DESC_CHAIN_END 32768
-
-#define VIRTQUEUE_MAX_NAME_SZ 32
-
-struct pmd_internals {
- struct rte_eth_stats eth_stats;
- int port_id;
- int virtio_idx;
-};
-
-
-struct virtqueue {
- char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- struct rte_mempool *mpool; /**< mempool for mbuf allocation */
- uint16_t queue_id; /**< DPDK queue index. */
- uint16_t vq_queue_index; /**< PCI queue index */
- uint8_t port_id; /**< Device port identifier. */
-
- void *vq_ring_virt_mem; /**< virtual address of vring*/
- int vq_alignment;
- int vq_ring_size;
-
- struct vring vq_ring; /**< vring keeping desc, used and avail */
- struct pmd_internals *internals; /**< virtio device internal info. */
- uint16_t vq_nentries; /**< vring desc numbers */
- uint16_t vq_desc_head_idx;
- uint16_t vq_free_cnt; /**< num of desc available */
- uint16_t vq_used_cons_idx; /**< Last consumed desc in used table, trails vq_ring.used->idx*/
-
- struct vq_desc_extra {
- void *cookie;
- uint16_t ndescs;
- } vq_descx[0] __rte_cache_aligned;
-};
-
-
-#ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP
-#define VIRTQUEUE_DUMP(vq) do { \
- uint16_t used_idx, nused; \
- used_idx = (vq)->vq_ring.used->idx; \
- nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
- PMD_INIT_LOG(DEBUG, \
- "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
- " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
- " avail.flags=0x%x; used.flags=0x%x\n", \
- (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
- (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
- (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
- (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
-} while (0)
-#else
-#define VIRTQUEUE_DUMP(vq) do { } while (0)
-#endif
-
-
-/**
- * Dump virtqueue internal structures, for debug purpose only.
- */
-void virtqueue_dump(struct virtqueue *vq);
-
-/**
- * Get all mbufs to be freed.
- */
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
-
-static __rte_always_inline int
-virtqueue_full(const struct virtqueue *vq)
-{
- return vq->vq_free_cnt == 0;
-}
-
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
-
-static __rte_always_inline void
-vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
-{
- uint16_t avail_idx;
- /*
- * Place the head of the descriptor chain into the next slot and make
- * it usable to the host. The chain is made available now rather than
- * deferring to virtqueue_notify() in the hopes that if the host is
- * currently running on another CPU, we can keep it processing the new
- * descriptor.
- */
- avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
- vq->vq_ring.avail->ring[avail_idx] = desc_idx;
- rte_smp_wmb();
- vq->vq_ring.avail->idx++;
-}
-
-static __rte_always_inline void
-vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
-{
- struct vring_desc *dp;
- struct vq_desc_extra *dxp;
-
- dp = &vq->vq_ring.desc[desc_idx];
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
- while (dp->flags & VRING_DESC_F_NEXT) {
- dp = &vq->vq_ring.desc[dp->next];
- }
- dxp->ndescs = 0;
-
- /*
- * We must append the existing free chain, if any, to the end of
- * newly freed chain. If the virtqueue was completely used, then
- * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
- */
- dp->next = vq->vq_desc_head_idx;
- vq->vq_desc_head_idx = desc_idx;
-}
-
-static __rte_always_inline int
-virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
-{
- const uint16_t needed = 1;
- const uint16_t head_idx = rxvq->vq_desc_head_idx;
- struct vring_desc *start_dp = rxvq->vq_ring.desc;
- struct vq_desc_extra *dxp;
-
- if (unlikely(rxvq->vq_free_cnt == 0))
- return -ENOSPC;
- if (unlikely(rxvq->vq_free_cnt < needed))
- return -EMSGSIZE;
- if (unlikely(head_idx >= rxvq->vq_nentries))
- return -EFAULT;
-
- dxp = &rxvq->vq_descx[head_idx];
- dxp->cookie = (void *)cookie;
- dxp->ndescs = needed;
-
- start_dp[head_idx].addr =
- (uint64_t) ((uintptr_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
- start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
- start_dp[head_idx].flags = VRING_DESC_F_WRITE;
- rxvq->vq_desc_head_idx = start_dp[head_idx].next;
- rxvq->vq_free_cnt = (uint16_t)(rxvq->vq_free_cnt - needed);
- vq_ring_update_avail(rxvq, head_idx);
-
- return 0;
-}
-
-static __rte_always_inline int
-virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
-{
-
- const uint16_t needed = 2;
- struct vring_desc *start_dp = txvq->vq_ring.desc;
- uint16_t head_idx = txvq->vq_desc_head_idx;
- uint16_t idx = head_idx;
- struct vq_desc_extra *dxp;
-
- if (unlikely(txvq->vq_free_cnt == 0))
- return -ENOSPC;
- if (unlikely(txvq->vq_free_cnt < needed))
- return -EMSGSIZE;
- if (unlikely(head_idx >= txvq->vq_nentries))
- return -EFAULT;
-
- dxp = &txvq->vq_descx[idx];
- dxp->cookie = (void *)cookie;
- dxp->ndescs = needed;
-
- start_dp = txvq->vq_ring.desc;
- start_dp[idx].addr = 0;
-/*
- * TODO: save one desc here?
- */
- start_dp[idx].len = sizeof(struct virtio_net_hdr);
- start_dp[idx].flags = VRING_DESC_F_NEXT;
- start_dp[idx].addr = (uintptr_t)NULL;
- idx = start_dp[idx].next;
- start_dp[idx].addr = (uint64_t)rte_pktmbuf_mtod(cookie, uintptr_t);
- start_dp[idx].len = cookie->data_len;
- start_dp[idx].flags = 0;
- idx = start_dp[idx].next;
- txvq->vq_desc_head_idx = idx;
- txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
- vq_ring_update_avail(txvq, head_idx);
-
- return 0;
-}
-
-static __rte_always_inline uint16_t
-virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
-{
- struct vring_used_elem *uep;
- struct rte_mbuf *cookie;
- uint16_t used_idx, desc_idx;
- uint16_t i;
- /* Caller does the check */
- for (i = 0; i < num ; i ++) {
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t) uep->id;
- cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
- if (unlikely(cookie == NULL)) {
- PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
- vq->vq_used_cons_idx);
- RTE_LOG(ERR, PMD, "%s: inconsistent (%u, %u)\n", __func__, used_idx , desc_idx);
- break;
- }
- len[i] = uep->len;
- rx_pkts[i] = cookie;
- vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
- vq->vq_descx[desc_idx].cookie = NULL;
- }
- return i;
-}
-
-#endif /* _VIRTQUEUE_H_ */
diff --git a/examples/Makefile b/examples/Makefile
index 28354ff8..9f7974a1 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -43,13 +43,15 @@ DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += distributor
DIRS-y += ethtool
DIRS-y += exception_path
DIRS-$(CONFIG_RTE_LIBRTE_EFD) += server_node_efd
+DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += flow_classify
+DIRS-y += flow_filtering
DIRS-y += helloworld
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += ip_pipeline
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
DIRS-$(CONFIG_RTE_IP_FRAG) += ip_reassembly
DIRS-$(CONFIG_RTE_IP_FRAG) += ip_fragmentation
endif
-ifeq ($(CONFIG_RTE_LIBRTE_ACL)$(CONFIG_RTE_LIBRTE_HASH)$(CONFIG_RTE_LIBRTE_LPM),yyy)
+ifeq ($(CONFIG_RTE_LIBRTE_ACL)$(CONFIG_RTE_LIBRTE_HASH)$(CONFIG_RTE_LIBRTE_LPM)$(CONFIG_RTE_LIBRTE_SECURITY),yyyy)
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += ipsec-secgw
endif
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += ipv4_multicast
@@ -83,13 +85,13 @@ DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
+DIRS-y += service_cores
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi
-DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += vhost_xen
DIRS-y += vmdq
DIRS-y += vmdq_dcb
ifeq ($(CONFIG_RTE_LIBRTE_POWER), y)
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 2d019d43..8e3b1f34 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -51,7 +51,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -61,7 +60,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -141,10 +139,10 @@
addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
-uint8_t slaves[RTE_MAX_ETHPORTS];
-uint8_t slaves_count;
+uint16_t slaves[RTE_MAX_ETHPORTS];
+uint16_t slaves_count;
-static uint8_t BOND_PORT = 0xff;
+static uint16_t BOND_PORT = 0xffff;
static struct rte_mempool *mbuf_pool;
@@ -171,7 +169,7 @@ static struct rte_eth_conf port_conf = {
};
static void
-slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
+slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
@@ -215,7 +213,7 @@ slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
struct ether_addr addr;
rte_eth_macaddr_get(portid, &addr);
- printf("Port %u MAC: ", (unsigned)portid);
+ printf("Port %u MAC: ", portid);
PRINT_MAC(addr);
printf("\n");
}
@@ -234,7 +232,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)
rte_exit(EXIT_FAILURE,
"Faled to create bond port\n");
- BOND_PORT = (uint8_t)retval;
+ BOND_PORT = retval;
retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
if (retval != 0)
@@ -675,10 +673,10 @@ static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- uint8_t slaves[16] = {0};
+ uint16_t slaves[16] = {0};
uint8_t len = 16;
struct ether_addr addr;
- uint8_t i = 0;
+ uint16_t i = 0;
while (i < slaves_count) {
rte_eth_macaddr_get(i, &addr);
diff --git a/examples/cmdline/main.c b/examples/cmdline/main.c
index c966df03..c6de944e 100644
--- a/examples/cmdline/main.c
+++ b/examples/cmdline/main.c
@@ -71,7 +71,6 @@
#include <cmdline.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_debug.h>
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 87603d03..61e6e6b9 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -132,7 +132,7 @@ static void print_stats(void);
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
@@ -175,13 +175,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
while (!link.link_status) {
- printf("Waiting for Link up on port %"PRIu8"\n", port);
+ printf("Waiting for Link up on port %"PRIu16"\n", port);
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
if (!link.link_status) {
- printf("Link down on port %"PRIu8"\n", port);
+ printf("Link down on port %"PRIu16"\n", port);
return 0;
}
@@ -189,7 +189,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -210,9 +210,9 @@ struct lcore_params {
static int
lcore_rx(struct lcore_params *p)
{
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
- uint8_t port;
+ uint16_t port;
struct rte_mbuf *bufs[BURST_SIZE*2];
for (port = 0; port < nb_ports; port++) {
@@ -312,9 +312,9 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
}
static inline void
-flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
+flush_all_ports(struct output_buffer *tx_buffers, uint16_t nb_ports)
{
- uint8_t outp;
+ uint16_t outp;
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
@@ -384,9 +384,9 @@ static int
lcore_tx(struct rte_ring *in_r)
{
static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
- uint8_t port;
+ uint16_t port;
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
@@ -668,8 +668,8 @@ main(int argc, char *argv[])
struct rte_ring *rx_dist_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
- uint8_t portid;
- uint8_t nb_ports_available;
+ uint16_t portid;
+ uint16_t nb_ports_available;
uint64_t t, freq;
/* catch ctrl-c so we can print on exit */
@@ -719,10 +719,10 @@ main(int argc, char *argv[])
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) portid);
+ printf("Initializing port %u... done\n", portid);
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
+ rte_exit(EXIT_FAILURE, "Cannot initialize port %u\n",
portid);
}
diff --git a/examples/ethtool/Makefile b/examples/ethtool/Makefile
index 30b42b70..e86d68ac 100644
--- a/examples/ethtool/Makefile
+++ b/examples/ethtool/Makefile
@@ -47,6 +47,5 @@ DIRS-y += lib ethtool-app
endif
DEPDIRS-ethtool-app := lib
-DEPDIRS-lib := librte_eal librte_ether
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index 266babad..cabd82a0 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -59,5 +59,6 @@ ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
LDLIBS += -lrte_pmd_ixgbe
endif
endif
+LDLIBS += -lrte_eal -lrte_ethdev
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 252382cb..c70c5478 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -36,7 +36,7 @@
#include <rte_version.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
@@ -47,7 +47,7 @@
int
-rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
+rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo)
{
struct rte_eth_dev_info dev_info;
struct rte_dev_reg_info reg_info;
@@ -106,7 +106,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
}
int
-rte_ethtool_get_regs_len(uint8_t port_id)
+rte_ethtool_get_regs_len(uint16_t port_id)
{
struct rte_dev_reg_info reg_info;
int ret;
@@ -121,7 +121,7 @@ rte_ethtool_get_regs_len(uint8_t port_id)
}
int
-rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs, void *data)
+rte_ethtool_get_regs(uint16_t port_id, struct ethtool_regs *regs, void *data)
{
struct rte_dev_reg_info reg_info;
int status;
@@ -141,7 +141,7 @@ rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs, void *data)
}
int
-rte_ethtool_get_link(uint8_t port_id)
+rte_ethtool_get_link(uint16_t port_id)
{
struct rte_eth_link link;
@@ -151,13 +151,13 @@ rte_ethtool_get_link(uint8_t port_id)
}
int
-rte_ethtool_get_eeprom_len(uint8_t port_id)
+rte_ethtool_get_eeprom_len(uint16_t port_id)
{
return rte_eth_dev_get_eeprom_length(port_id);
}
int
-rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+rte_ethtool_get_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words)
{
struct rte_dev_eeprom_info eeprom_info;
@@ -180,7 +180,7 @@ rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
}
int
-rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words)
{
struct rte_dev_eeprom_info eeprom_info;
@@ -203,7 +203,7 @@ rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
}
int
-rte_ethtool_get_pauseparam(uint8_t port_id,
+rte_ethtool_get_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param)
{
struct rte_eth_fc_conf fc_conf;
@@ -238,7 +238,7 @@ rte_ethtool_get_pauseparam(uint8_t port_id,
}
int
-rte_ethtool_set_pauseparam(uint8_t port_id,
+rte_ethtool_set_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param)
{
struct rte_eth_fc_conf fc_conf;
@@ -281,7 +281,7 @@ rte_ethtool_set_pauseparam(uint8_t port_id,
}
int
-rte_ethtool_net_open(uint8_t port_id)
+rte_ethtool_net_open(uint16_t port_id)
{
rte_eth_dev_stop(port_id);
@@ -289,7 +289,7 @@ rte_ethtool_net_open(uint8_t port_id)
}
int
-rte_ethtool_net_stop(uint8_t port_id)
+rte_ethtool_net_stop(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_stop(port_id);
@@ -298,7 +298,7 @@ rte_ethtool_net_stop(uint8_t port_id)
}
int
-rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
+rte_ethtool_net_get_mac_addr(uint16_t port_id, struct ether_addr *addr)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (addr == NULL)
@@ -309,7 +309,7 @@ rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
}
int
-rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr)
+rte_ethtool_net_set_mac_addr(uint16_t port_id, struct ether_addr *addr)
{
if (addr == NULL)
return -EINVAL;
@@ -317,7 +317,7 @@ rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr)
}
int
-rte_ethtool_net_validate_addr(uint8_t port_id __rte_unused,
+rte_ethtool_net_validate_addr(uint16_t port_id __rte_unused,
struct ether_addr *addr)
{
if (addr == NULL)
@@ -326,7 +326,7 @@ rte_ethtool_net_validate_addr(uint8_t port_id __rte_unused,
}
int
-rte_ethtool_net_change_mtu(uint8_t port_id, int mtu)
+rte_ethtool_net_change_mtu(uint16_t port_id, int mtu)
{
if (mtu < 0 || mtu > UINT16_MAX)
return -EINVAL;
@@ -334,7 +334,7 @@ rte_ethtool_net_change_mtu(uint8_t port_id, int mtu)
}
int
-rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats)
+rte_ethtool_net_get_stats64(uint16_t port_id, struct rte_eth_stats *stats)
{
if (stats == NULL)
return -EINVAL;
@@ -342,13 +342,13 @@ rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats)
}
int
-rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid)
+rte_ethtool_net_vlan_rx_add_vid(uint16_t port_id, uint16_t vid)
{
return rte_eth_dev_vlan_filter(port_id, vid, 1);
}
int
-rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid)
+rte_ethtool_net_vlan_rx_kill_vid(uint16_t port_id, uint16_t vid)
{
return rte_eth_dev_vlan_filter(port_id, vid, 0);
}
@@ -361,7 +361,7 @@ rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid)
* driver can register device-specific implementation
*/
int
-rte_ethtool_net_set_rx_mode(uint8_t port_id)
+rte_ethtool_net_set_rx_mode(uint16_t port_id)
{
uint16_t num_vfs;
struct rte_eth_dev_info dev_info;
@@ -387,7 +387,7 @@ rte_ethtool_net_set_rx_mode(uint8_t port_id)
int
-rte_ethtool_get_ringparam(uint8_t port_id,
+rte_ethtool_get_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param)
{
struct rte_eth_dev_info dev_info;
@@ -419,7 +419,7 @@ rte_ethtool_get_ringparam(uint8_t port_id,
int
-rte_ethtool_set_ringparam(uint8_t port_id,
+rte_ethtool_set_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param)
{
struct rte_eth_rxq_info rx_qinfo;
diff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h
index 18f44404..1cd8f4d2 100644
--- a/examples/ethtool/lib/rte_ethtool.h
+++ b/examples/ethtool/lib/rte_ethtool.h
@@ -79,7 +79,7 @@ extern "C" {
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo);
+int rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo);
/**
* Retrieve the Ethernet device register length in bytes.
@@ -93,7 +93,7 @@ int rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_regs_len(uint8_t port_id);
+int rte_ethtool_get_regs_len(uint16_t port_id);
/**
* Retrieve the Ethernet device register information according to
@@ -111,7 +111,7 @@ int rte_ethtool_get_regs_len(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs,
+int rte_ethtool_get_regs(uint16_t port_id, struct ethtool_regs *regs,
void *data);
/**
@@ -127,7 +127,7 @@ int rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_link(uint8_t port_id);
+int rte_ethtool_get_link(uint16_t port_id);
/**
* Retrieve the Ethernet device EEPROM size
@@ -141,7 +141,7 @@ int rte_ethtool_get_link(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_eeprom_len(uint8_t port_id);
+int rte_ethtool_get_eeprom_len(uint16_t port_id);
/**
* Retrieve EEPROM content based upon eeprom range described in ethtool
@@ -159,7 +159,7 @@ int rte_ethtool_get_eeprom_len(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+int rte_ethtool_get_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words);
/**
@@ -179,7 +179,7 @@ int rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+int rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words);
/**
@@ -199,7 +199,7 @@ int rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_pauseparam(uint8_t port_id,
+int rte_ethtool_get_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param);
/**
@@ -217,7 +217,7 @@ int rte_ethtool_get_pauseparam(uint8_t port_id,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_set_pauseparam(uint8_t port_id,
+int rte_ethtool_set_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *param);
/**
@@ -231,7 +231,7 @@ int rte_ethtool_set_pauseparam(uint8_t port_id,
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_open(uint8_t port_id);
+int rte_ethtool_net_open(uint16_t port_id);
/**
* Stop the Ethernet device.
@@ -242,7 +242,7 @@ int rte_ethtool_net_open(uint8_t port_id);
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_net_stop(uint8_t port_id);
+int rte_ethtool_net_stop(uint16_t port_id);
/**
* Get the Ethernet device MAC address.
@@ -255,7 +255,7 @@ int rte_ethtool_net_stop(uint8_t port_id);
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_get_mac_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Setting the Ethernet device MAC address.
@@ -271,7 +271,7 @@ int rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_set_mac_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Validate if the provided MAC address is valid unicast address
@@ -287,7 +287,7 @@ int rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_validate_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_validate_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Setting the Ethernet device maximum Tx unit.
@@ -303,7 +303,7 @@ int rte_ethtool_net_validate_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_change_mtu(uint8_t port_id, int mtu);
+int rte_ethtool_net_change_mtu(uint16_t port_id, int mtu);
/**
* Retrieve the Ethernet device traffic statistics
@@ -319,7 +319,7 @@ int rte_ethtool_net_change_mtu(uint8_t port_id, int mtu);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats);
+int rte_ethtool_net_get_stats64(uint16_t port_id, struct rte_eth_stats *stats);
/**
* Update the Ethernet device VLAN filter with new vid
@@ -334,7 +334,7 @@ int rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid);
+int rte_ethtool_net_vlan_rx_add_vid(uint16_t port_id, uint16_t vid);
/**
* Remove VLAN id from Ethernet device.
@@ -349,7 +349,7 @@ int rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid);
+int rte_ethtool_net_vlan_rx_kill_vid(uint16_t port_id, uint16_t vid);
/**
* Setting the Ethernet device rx mode.
@@ -362,7 +362,7 @@ int rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_set_rx_mode(uint8_t port_id);
+int rte_ethtool_net_set_rx_mode(uint16_t port_id);
/**
* Getting ring parameters for Ethernet device.
@@ -380,7 +380,7 @@ int rte_ethtool_net_set_rx_mode(uint8_t port_id);
* Only the tx_pending and rx_pending fields of struct ethtool_ringparam
* are used, and the function only gets parameters for queue 0.
*/
-int rte_ethtool_get_ringparam(uint8_t port_id,
+int rte_ethtool_get_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param);
/**
@@ -399,7 +399,7 @@ int rte_ethtool_get_ringparam(uint8_t port_id,
* Only the tx_pending and rx_pending fields of struct ethtool_ringparam
* are used, and the function only sets parameters for queue 0.
*/
-int rte_ethtool_set_ringparam(uint8_t port_id,
+int rte_ethtool_set_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param);
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index dd75cb7a..5f431d87 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,6 +46,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_service.h>
#define MAX_NUM_STAGES 8
#define BATCH_SIZE 16
@@ -76,6 +77,7 @@ struct fastpath_data {
uint32_t rx_lock;
uint32_t tx_lock;
uint32_t sched_lock;
+ uint32_t evdev_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -108,7 +110,7 @@ struct config_data {
static struct config_data cdata = {
.num_packets = (1L << 25), /* do ~32M packets */
.num_fids = 512,
- .queue_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .queue_type = RTE_SCHED_TYPE_ATOMIC,
.next_qid = {-1},
.qid = {-1},
.num_stages = 1,
@@ -233,7 +235,7 @@ producer(void)
}
static inline void
-schedule_devices(uint8_t dev_id, unsigned int lcore_id)
+schedule_devices(unsigned int lcore_id)
{
if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
@@ -243,7 +245,7 @@ schedule_devices(uint8_t dev_id, unsigned int lcore_id)
if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
- rte_event_schedule(dev_id);
+ rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
if (cdata.dump_dev_signal) {
rte_event_dev_dump(0, stdout);
cdata.dump_dev_signal = 0;
@@ -294,7 +296,7 @@ worker(void *arg)
while (!fdata->done) {
uint16_t i;
- schedule_devices(dev_id, lcore_id);
+ schedule_devices(lcore_id);
if (!fdata->worker_core[lcore_id]) {
rte_pause();
@@ -490,10 +492,10 @@ parse_app_args(int argc, char **argv)
cdata.enable_queue_priorities = 1;
break;
case 'o':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
break;
case 'p':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
break;
case 'q':
cdata.quiet = 1;
@@ -684,7 +686,7 @@ setup_eventdev(struct prod_data *prod_data,
.new_event_threshold = 4096,
};
struct rte_event_queue_conf wkr_q_conf = {
- .event_queue_cfg = cdata.queue_type,
+ .schedule_type = cdata.queue_type,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -696,11 +698,7 @@ setup_eventdev(struct prod_data *prod_data,
};
const struct rte_event_queue_conf tx_q_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
- .event_queue_cfg =
- RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
- RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
};
struct port_link worker_queues[MAX_NUM_STAGES];
@@ -755,11 +753,11 @@ setup_eventdev(struct prod_data *prod_data,
}
const char *type_str = "Atomic";
- switch (wkr_q_conf.event_queue_cfg) {
- case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
type_str = "Ordered";
break;
- case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ case RTE_SCHED_TYPE_PARALLEL:
type_str = "Parallel";
break;
}
@@ -843,6 +841,14 @@ setup_eventdev(struct prod_data *prod_data,
*cons_data = (struct cons_data){.dev_id = dev_id,
.port_id = i };
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID for sw eventdev\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
if (rte_event_dev_start(dev_id) < 0) {
printf("Error starting eventdev\n");
return -1;
@@ -911,9 +917,9 @@ main(int argc, char **argv)
printf("\tworkers: %u\n", cdata.num_workers);
printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
printf("\tqid0 type: ordered\n");
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
printf("\tqid0 type: atomic\n");
printf("\tCores available: %u\n", rte_lcore_count());
printf("\tCores used: %u\n", cores_needed);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index e551e6d1..f8f5bbdf 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -55,7 +55,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@ -63,7 +62,6 @@
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -135,7 +133,7 @@ static uint64_t input_cores_mask = 0;
static uint64_t output_cores_mask = 0;
/* Array storing port_id that is associated with each lcore */
-static uint8_t port_ids[RTE_MAX_LCORE];
+static uint16_t port_ids[RTE_MAX_LCORE];
/* Structure type for recording lcore-specific stats */
struct stats {
@@ -360,8 +358,8 @@ static void
setup_port_lcore_affinities(void)
{
unsigned long i;
- uint8_t tx_port = 0;
- uint8_t rx_port = 0;
+ uint16_t tx_port = 0;
+ uint16_t rx_port = 0;
/* Setup port_ids[] array, and check masks were ok */
RTE_LCORE_FOREACH(i) {
@@ -444,24 +442,23 @@ parse_args(int argc, char **argv)
/* Initialise a single port on an Ethernet device */
static void
-init_port(uint8_t port)
+init_port(uint16_t port)
{
int ret;
uint16_t nb_rxd = NB_RXD;
uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
- PRINT_INFO("Initialising port %u ...", (unsigned)port);
+ PRINT_INFO("Initialising port %u ...", port);
fflush(stdout);
ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
if (ret < 0)
- FATAL_ERROR("Could not configure port%u (%d)",
- (unsigned)port, ret);
+ FATAL_ERROR("Could not configure port%u (%d)", port, ret);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
if (ret < 0)
FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
rte_eth_dev_socket_id(port),
@@ -469,29 +466,30 @@ init_port(uint8_t port)
pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_dev_start(port);
if (ret < 0)
- FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret);
+ FATAL_ERROR("Could not start port%u (%d)", port, ret);
rte_eth_promiscuous_enable(port);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -506,14 +504,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -546,7 +543,7 @@ main(int argc, char** argv)
{
int ret;
unsigned i,high_port;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
diff --git a/examples/flow_classify/Makefile b/examples/flow_classify/Makefile
new file mode 100644
index 00000000..eecdde14
--- /dev/null
+++ b/examples/flow_classify/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = flow_classify
+
+
+# all source are stored in SRCS-y
+SRCS-y := flow_classify.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
new file mode 100644
index 00000000..766f1dd0
--- /dev/null
+++ b/examples/flow_classify/flow_classify.c
@@ -0,0 +1,852 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_flow.h>
+#include <rte_flow_classify.h>
+#include <rte_table_acl.h>
+
+#define RX_RING_SIZE 128
+#define TX_RING_SIZE 512
+
+#define NUM_MBUFS 8191
+#define MBUF_CACHE_SIZE 250
+#define BURST_SIZE 32
+
+#define MAX_NUM_CLASSIFY 30
+#define FLOW_CLASSIFY_MAX_RULE_NUM 91
+#define FLOW_CLASSIFY_MAX_PRIORITY 8
+#define FLOW_CLASSIFIER_NAME_SIZE 64
+
+#define COMMENT_LEAD_CHAR ('#')
+#define OPTION_RULE_IPV4 "rule_ipv4"
+#define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3
+#define flow_classify_log(format, ...) \
+ RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
+
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+ *a = (unsigned char)(ip >> 24 & 0xff);\
+ *b = (unsigned char)(ip >> 16 & 0xff);\
+ *c = (unsigned char)(ip >> 8 & 0xff);\
+ *d = (unsigned char)(ip & 0xff);\
+ } while (0)
+
+enum {
+ CB_FLD_SRC_ADDR,
+ CB_FLD_DST_ADDR,
+ CB_FLD_SRC_PORT,
+ CB_FLD_SRC_PORT_DLM,
+ CB_FLD_SRC_PORT_MASK,
+ CB_FLD_DST_PORT,
+ CB_FLD_DST_PORT_DLM,
+ CB_FLD_DST_PORT_MASK,
+ CB_FLD_PROTO,
+ CB_FLD_PRIORITY,
+ CB_FLD_NUM,
+};
+
+static struct{
+ const char *rule_ipv4_name;
+} parm_config;
+const char cb_port_delim[] = ":";
+
+static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+};
+
+struct flow_classifier {
+ struct rte_flow_classifier *cls;
+ uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX];
+};
+
+struct flow_classifier_acl {
+ struct flow_classifier cls;
+} __rte_cache_aligned;
+
+/* ACL field definitions for IPv4 5 tuple rule */
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+enum {
+ PROTO_INPUT_IPV4,
+ SRC_INPUT_IPV4,
+ DST_INPUT_IPV4,
+ SRCP_DESTP_INPUT_IPV4
+};
+
+static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+ /* first input field - always one byte long. */
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = PROTO_FIELD_IPV4,
+ .input_index = PROTO_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+ /* next input field (IPv4 source address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = SRC_FIELD_IPV4,
+ .input_index = SRC_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+ /* next input field (IPv4 destination address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = DST_FIELD_IPV4,
+ .input_index = DST_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+ /*
+ * Next 2 fields (src & dst ports) form 4 consecutive bytes.
+ * They share the same input index.
+ */
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = SRCP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = DSTP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+/* flow classify data */
+static int num_classify_rules;
+static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
+static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
+static struct rte_flow_classify_stats classify_stats = {
+ .stats = (void **)&ntuple_stats
+};
+
+/* parameters for rte_flow_classify_validate and
+ * rte_flow_classify_table_entry_add functions
+ */
+
+static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
+ 0, 0, 0 };
+static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
+ 0, 0, 0 };
+
+/* sample actions:
+ * "actions count / end"
+ */
+static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0};
+static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
+static struct rte_flow_action actions[2];
+
+/* sample attributes */
+static struct rte_flow_attr attr;
+
+/* flow_classify.c: * Based on DPDK skeleton forwarding example. */
+
+/*
+ * Initializes a given port using global settings and with the RX buffers
+ * coming from the mbuf_pool passed as a parameter.
+ */
+static inline int
+port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+{
+ struct rte_eth_conf port_conf = port_conf_default;
+ struct ether_addr addr;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+ return 0;
+}
+
+/*
+ * The lcore main. This is the main thread that does the work, reading from
+ * an input port classifying the packets and writing to an output port.
+ */
+static __attribute__((noreturn)) void
+lcore_main(struct flow_classifier *cls_app)
+{
+ const uint8_t nb_ports = rte_eth_dev_count();
+ uint8_t port;
+ int ret;
+ int i = 0;
+
+ ret = rte_flow_classify_table_entry_delete(cls_app->cls,
+ cls_app->table_id[0], rules[7]);
+ if (ret)
+ printf("table_entry_delete failed [7] %d\n\n", ret);
+ else
+ printf("table_entry_delete succeeded [7]\n\n");
+
+ /*
+ * Check that the port is on the same NUMA node as the polling thread
+ * for best performance.
+ */
+ for (port = 0; port < nb_ports; port++)
+ if (rte_eth_dev_socket_id(port) > 0 &&
+ rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
+ printf("\n\n");
+ printf("WARNING: port %u is on remote NUMA node\n",
+ port);
+ printf("to polling thread.\n");
+ printf("Performance will not be optimal.\n");
+
+ printf("\nCore %u forwarding packets. ",
+ rte_lcore_id());
+ printf("[Ctrl+C to quit]\n");
+ }
+ /* Run until the application is quit or killed. */
+ for (;;) {
+ /*
+ * Receive packets on a port, classify them and forward them
+ * on the paired port.
+ * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
+ */
+ for (port = 0; port < nb_ports; port++) {
+ /* Get burst of RX packets, from first port of pair. */
+ struct rte_mbuf *bufs[BURST_SIZE];
+ const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
+ bufs, BURST_SIZE);
+
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
+ if (rules[i]) {
+ ret = rte_flow_classifier_query(
+ cls_app->cls,
+ cls_app->table_id[0],
+ bufs, nb_rx, rules[i],
+ &classify_stats);
+ if (ret)
+ printf(
+ "rule [%d] query failed ret [%d]\n\n",
+ i, ret);
+ else {
+ printf(
+ "rule[%d] count=%"PRIu64"\n",
+ i, ntuple_stats.counter1);
+
+ printf("proto = %d\n",
+ ntuple_stats.ipv4_5tuple.proto);
+ }
+ }
+ }
+
+ /* Send burst of TX packets, to second port of pair. */
+ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
+ bufs, nb_rx);
+
+ /* Free any unsent packets. */
+ if (unlikely(nb_tx < nb_rx)) {
+ uint16_t buf;
+
+ for (buf = nb_tx; buf < nb_rx; buf++)
+ rte_pktmbuf_free(bufs[buf]);
+ }
+ }
+ }
+}
+
+/*
+ * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
+ * Expected format:
+ * <src_ipv4_addr>'/'<masklen> <space> \
+ * <dst_ipv4_addr>'/'<masklen> <space> \
+ * <src_port> <space> ":" <src_port_mask> <space> \
+ * <dst_port> <space> ":" <dst_port_mask> <space> \
+ * <proto>'/'<proto_mask> <space> \
+ * <priority>
+ */
+
+static int
+get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
+ char dlm)
+{
+ unsigned long val;
+ char *end;
+
+ errno = 0;
+ val = strtoul(*in, &end, base);
+ if (errno != 0 || end[0] != dlm || val > lim)
+ return -EINVAL;
+ *fd = (uint32_t)val;
+ *in = end + 1;
+ return 0;
+}
+
+static int
+parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
+{
+ uint32_t a, b, c, d, m;
+
+ if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
+ return -EINVAL;
+ if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
+ return -EINVAL;
+
+ addr[0] = IPv4(a, b, c, d);
+ mask_len[0] = m;
+ return 0;
+}
+
+static int
+parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ int i, ret;
+ char *s, *sp, *in[CB_FLD_NUM];
+ static const char *dlm = " \t\n";
+ int dim = CB_FLD_NUM;
+ uint32_t temp;
+
+ s = str;
+ for (i = 0; i != dim; i++, s = NULL) {
+ in[i] = strtok_r(s, dlm, &sp);
+ if (in[i] == NULL)
+ return -EINVAL;
+ }
+
+ ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
+ &ntuple_filter->src_ip,
+ &ntuple_filter->src_ip_mask);
+ if (ret != 0) {
+ flow_classify_log("failed to read source address/mask: %s\n",
+ in[CB_FLD_SRC_ADDR]);
+ return ret;
+ }
+
+ ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
+ &ntuple_filter->dst_ip,
+ &ntuple_filter->dst_ip_mask);
+ if (ret != 0) {
+ flow_classify_log("failed to read source address/mask: %s\n",
+ in[CB_FLD_DST_ADDR]);
+ return ret;
+ }
+
+ if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->src_port = (uint16_t)temp;
+
+ if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
+ sizeof(cb_port_delim)) != 0)
+ return -EINVAL;
+
+ if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->src_port_mask = (uint16_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->dst_port = (uint16_t)temp;
+
+ if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
+ sizeof(cb_port_delim)) != 0)
+ return -EINVAL;
+
+ if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->dst_port_mask = (uint16_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
+ return -EINVAL;
+ ntuple_filter->proto = (uint8_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->proto_mask = (uint8_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->priority = (uint16_t)temp;
+ if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Bypass comment and empty lines */
+static inline int
+is_bypass_line(char *buff)
+{
+ int i = 0;
+
+ /* comment line */
+ if (buff[0] == COMMENT_LEAD_CHAR)
+ return 1;
+ /* empty line */
+ while (buff[i] != '\0') {
+ if (!isspace(buff[i]))
+ return 0;
+ i++;
+ }
+ return 1;
+}
+
+static uint32_t
+convert_depth_to_bitmask(uint32_t depth_val)
+{
+ uint32_t bitmask = 0;
+ int i, j;
+
+ for (i = depth_val, j = 0; i > 0; i--, j++)
+ bitmask |= (1 << (31 - j));
+ return bitmask;
+}
+
+static int
+add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
+ struct flow_classifier *cls_app)
+{
+ int ret = -1;
+ int key_found;
+ struct rte_flow_error error;
+ struct rte_flow_item_ipv4 ipv4_spec;
+ struct rte_flow_item_ipv4 ipv4_mask;
+ struct rte_flow_item ipv4_udp_item;
+ struct rte_flow_item ipv4_tcp_item;
+ struct rte_flow_item ipv4_sctp_item;
+ struct rte_flow_item_udp udp_spec;
+ struct rte_flow_item_udp udp_mask;
+ struct rte_flow_item udp_item;
+ struct rte_flow_item_tcp tcp_spec;
+ struct rte_flow_item_tcp tcp_mask;
+ struct rte_flow_item tcp_item;
+ struct rte_flow_item_sctp sctp_spec;
+ struct rte_flow_item_sctp sctp_mask;
+ struct rte_flow_item sctp_item;
+ struct rte_flow_item pattern_ipv4_5tuple[4];
+ struct rte_flow_classify_rule *rule;
+ uint8_t ipv4_proto;
+
+ if (num_classify_rules >= MAX_NUM_CLASSIFY) {
+ printf(
+ "\nINFO: classify rule capacity %d reached\n",
+ num_classify_rules);
+ return ret;
+ }
+
+ /* set up parameters for validate and add */
+ memset(&ipv4_spec, 0, sizeof(ipv4_spec));
+ ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
+ ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
+ ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
+ ipv4_proto = ipv4_spec.hdr.next_proto_id;
+
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
+ ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
+ ipv4_mask.hdr.src_addr =
+ convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
+ ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
+ ipv4_mask.hdr.dst_addr =
+ convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
+
+ switch (ipv4_proto) {
+ case IPPROTO_UDP:
+ ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_udp_item.spec = &ipv4_spec;
+ ipv4_udp_item.mask = &ipv4_mask;
+ ipv4_udp_item.last = NULL;
+
+ udp_spec.hdr.src_port = ntuple_filter->src_port;
+ udp_spec.hdr.dst_port = ntuple_filter->dst_port;
+ udp_spec.hdr.dgram_len = 0;
+ udp_spec.hdr.dgram_cksum = 0;
+
+ udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+ udp_mask.hdr.dgram_len = 0;
+ udp_mask.hdr.dgram_cksum = 0;
+
+ udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
+ udp_item.spec = &udp_spec;
+ udp_item.mask = &udp_mask;
+ udp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_udp_item;
+ pattern_ipv4_5tuple[2] = udp_item;
+ break;
+ case IPPROTO_TCP:
+ ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_tcp_item.spec = &ipv4_spec;
+ ipv4_tcp_item.mask = &ipv4_mask;
+ ipv4_tcp_item.last = NULL;
+
+ memset(&tcp_spec, 0, sizeof(tcp_spec));
+ tcp_spec.hdr.src_port = ntuple_filter->src_port;
+ tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
+
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+
+ tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
+ tcp_item.spec = &tcp_spec;
+ tcp_item.mask = &tcp_mask;
+ tcp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_tcp_item;
+ pattern_ipv4_5tuple[2] = tcp_item;
+ break;
+ case IPPROTO_SCTP:
+ ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_sctp_item.spec = &ipv4_spec;
+ ipv4_sctp_item.mask = &ipv4_mask;
+ ipv4_sctp_item.last = NULL;
+
+ sctp_spec.hdr.src_port = ntuple_filter->src_port;
+ sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
+ sctp_spec.hdr.cksum = 0;
+ sctp_spec.hdr.tag = 0;
+
+ sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+ sctp_mask.hdr.cksum = 0;
+ sctp_mask.hdr.tag = 0;
+
+ sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
+ sctp_item.spec = &sctp_spec;
+ sctp_item.mask = &sctp_mask;
+ sctp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_sctp_item;
+ pattern_ipv4_5tuple[2] = sctp_item;
+ break;
+ default:
+ return ret;
+ }
+
+ attr.ingress = 1;
+ pattern_ipv4_5tuple[0] = eth_item;
+ pattern_ipv4_5tuple[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(
+ cls_app->cls, cls_app->table_id[0], &key_found,
+ &attr, pattern_ipv4_5tuple, actions, &error);
+ if (rule == NULL) {
+ printf("table entry add failed ipv4_proto = %u\n",
+ ipv4_proto);
+ ret = -1;
+ return ret;
+ }
+
+ rules[num_classify_rules] = rule;
+ num_classify_rules++;
+ return 0;
+}
+
+static int
+add_rules(const char *rule_path, struct flow_classifier *cls_app)
+{
+ FILE *fh;
+ char buff[LINE_MAX];
+ unsigned int i = 0;
+ unsigned int total_num = 0;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ int ret;
+
+ fh = fopen(rule_path, "rb");
+ if (fh == NULL)
+ rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
+ rule_path);
+
+ ret = fseek(fh, 0, SEEK_SET);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
+ ret);
+
+ i = 0;
+ while (fgets(buff, LINE_MAX, fh) != NULL) {
+ i++;
+
+ if (is_bypass_line(buff))
+ continue;
+
+ if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
+ printf("\nINFO: classify rule capacity %d reached\n",
+ total_num);
+ break;
+ }
+
+ if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
+ rte_exit(EXIT_FAILURE,
+ "%s Line %u: parse rules error\n",
+ rule_path, i);
+
+ if (add_classify_rule(&ntuple_filter, cls_app) != 0)
+ rte_exit(EXIT_FAILURE, "add rule error\n");
+
+ total_num++;
+ }
+
+ fclose(fh);
+ return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+ printf("%s usage:\n", prgname);
+ printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: ");
+ printf("specify the ipv4 rules file.\n");
+ printf("Each rule occupies one line in the file.\n");
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {OPTION_RULE_IPV4, 1, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* long options */
+ case 0:
+ if (!strncmp(lgopts[option_index].name,
+ OPTION_RULE_IPV4,
+ sizeof(OPTION_RULE_IPV4)))
+ parm_config.rule_ipv4_name = optarg;
+ break;
+ default:
+ print_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/*
+ * The main function, which does initialization and calls the lcore_main
+ * function.
+ */
+int
+main(int argc, char *argv[])
+{
+ struct rte_mempool *mbuf_pool;
+ uint8_t nb_ports;
+ uint8_t portid;
+ int ret;
+ int socket_id;
+ struct rte_table_acl_params table_acl_params;
+ struct rte_flow_classify_table_params cls_table_params;
+ struct flow_classifier *cls_app;
+ struct rte_flow_classifier_params cls_params;
+ uint32_t size;
+
+ /* Initialize the Environment Abstraction Layer (EAL). */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
+
+ /* Check that there is an even number of ports to send/receive on. */
+ nb_ports = rte_eth_dev_count();
+ if (nb_ports < 2 || (nb_ports & 1))
+ rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
+
+ /* Creates a new mempool in memory to hold the mbufs. */
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* Initialize all ports. */
+ for (portid = 0; portid < nb_ports; portid++)
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ portid);
+
+ if (rte_lcore_count() > 1)
+ printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
+
+ socket_id = rte_eth_dev_socket_id(0);
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
+ cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (cls_app == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
+
+ cls_params.name = "flow_classifier";
+ cls_params.socket_id = socket_id;
+ cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL;
+
+ cls_app->cls = rte_flow_classifier_create(&cls_params);
+ if (cls_app->cls == NULL) {
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
+ }
+
+ /* initialise ACL table params */
+ table_acl_params.name = "table_acl_ipv4_5tuple";
+ table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
+ table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
+ memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
+
+ /* initialise table create params */
+ cls_table_params.ops = &rte_table_acl_ops,
+ cls_table_params.arg_create = &table_acl_params,
+
+ ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params,
+ &cls_app->table_id[0]);
+ if (ret) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
+ }
+
+ /* read file of IPv4 5 tuple rules and initialize parameters
+ * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
+ * API's.
+ */
+ if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Failed to add rules\n");
+ }
+
+ /* Call lcore_main on the master core only. */
+ lcore_main(cls_app);
+
+ return 0;
+}
diff --git a/examples/flow_classify/ipv4_rules_file.txt b/examples/flow_classify/ipv4_rules_file.txt
new file mode 100644
index 00000000..dfa0631f
--- /dev/null
+++ b/examples/flow_classify/ipv4_rules_file.txt
@@ -0,0 +1,14 @@
+#file format:
+#src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
+#
+2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
+9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
+9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
+9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
+6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
+6.7.8.9/32 192.168.0.36/32 10 : 0xffff 11 : 0xffff 6/0xfe 5
+6.7.8.9/24 192.168.0.36/24 10 : 0xffff 11 : 0xffff 6/0xfe 6
+6.7.8.9/16 192.168.0.36/16 10 : 0xffff 11 : 0xffff 6/0xfe 7
+6.7.8.9/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 8
+#error rules
+#9.8.7.6/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 9 \ No newline at end of file
diff --git a/examples/flow_filtering/Makefile b/examples/flow_filtering/Makefile
new file mode 100644
index 00000000..70b82fe3
--- /dev/null
+++ b/examples/flow_filtering/Makefile
@@ -0,0 +1,49 @@
+#
+# BSD LICENSE
+#
+# Copyright 2017 Mellanox.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Mellanox nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = flow
+
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/flow_filtering/flow_blocks.c b/examples/flow_filtering/flow_blocks.c
new file mode 100644
index 00000000..f92df102
--- /dev/null
+++ b/examples/flow_filtering/flow_blocks.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Mellanox nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define MAX_PATTERN_NUM 4
+
+struct rte_flow *
+generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+ uint32_t src_ip, uint32_t src_mask,
+ uint32_t dest_ip, uint32_t dest_mask,
+ struct rte_flow_error *error);
+
+
+/**
+ * create a flow rule that sends packets with matching src and dest ip
+ * to selected queue.
+ *
+ * @param port_id
+ * The selected port.
+ * @param rx_q
+ * The selected target queue.
+ * @param src_ip
+ * The src ip value to match the input packet.
+ * @param src_mask
+ * The mask to apply to the src ip.
+ * @param dest_ip
+ * The dest ip value to match the input packet.
+ * @param dest_mask
+ * The mask to apply to the dest ip.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created else return NULL.
+ */
+struct rte_flow *
+generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+ uint32_t src_ip, uint32_t src_mask,
+ uint32_t dest_ip, uint32_t dest_mask,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_attr attr;
+ struct rte_flow_item pattern[MAX_PATTERN_NUM];
+ struct rte_flow_action action[MAX_PATTERN_NUM];
+ struct rte_flow *flow = NULL;
+ struct rte_flow_action_queue queue = { .index = rx_q };
+ struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_mask;
+ struct rte_flow_item_vlan vlan_spec;
+ struct rte_flow_item_vlan vlan_mask;
+ struct rte_flow_item_ipv4 ip_spec;
+ struct rte_flow_item_ipv4 ip_mask;
+ int res;
+
+ memset(pattern, 0, sizeof(pattern));
+ memset(action, 0, sizeof(action));
+
+ /*
+ * set the rule attribute.
+ * in this case only ingress packets will be checked.
+ */
+ memset(&attr, 0, sizeof(struct rte_flow_attr));
+ attr.ingress = 1;
+
+ /*
+ * create the action sequence.
+ * one action only, move packet to queue
+ */
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = &queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /*
+ * set the first level of the pattern (eth).
+ * since in this example we just want to get the
+ * ipv4 we set this level to allow all.
+ */
+ memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
+ memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
+ eth_spec.type = 0;
+ eth_mask.type = 0;
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth_spec;
+ pattern[0].mask = &eth_mask;
+
+ /*
+ * setting the second level of the pattern (vlan).
+ * since in this example we just want to get the
+ * ipv4 we also set this level to allow all.
+ */
+ memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
+ memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan_spec;
+ pattern[1].mask = &vlan_mask;
+
+ /*
+ * setting the third level of the pattern (ip).
+ * in this example this is the level we care about
+ * so we set it according to the parameters.
+ */
+ memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
+ memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
+ ip_spec.hdr.dst_addr = htonl(dest_ip);
+ ip_mask.hdr.dst_addr = dest_mask;
+ ip_spec.hdr.src_addr = htonl(src_ip);
+ ip_mask.hdr.src_addr = src_mask;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ip_spec;
+ pattern[2].mask = &ip_mask;
+
+ /* the final level must be always type end */
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ res = rte_flow_validate(port_id, &attr, pattern, action, error);
+ if (!res)
+ flow = rte_flow_create(port_id, &attr, pattern, action, error);
+
+ return flow;
+}
+
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
new file mode 100644
index 00000000..7d739b4a
--- /dev/null
+++ b/examples/flow_filtering/main.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Mellanox. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_net.h>
+#include <rte_flow.h>
+
+static volatile bool force_quit;
+
+static uint8_t port_id;
+static uint16_t nr_queues = 5;
+static uint8_t selected_queue = 1;
+struct rte_mempool *mbuf_pool;
+struct rte_flow *flow;
+
+#define SRC_IP ((0<<24) + (0<<16) + (0<<8) + 0) /* src ip = 0.0.0.0 */
+#define DEST_IP ((192<<24) + (168<<16) + (1<<8) + 1) /* dest ip = 192.168.1.1 */
+#define FULL_MASK 0xffffffff /* full mask */
+#define EMPTY_MASK 0x0 /* empty mask */
+
+#include "flow_blocks.c"
+
+static inline void
+print_ether_addr(const char *what, struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", what, buf);
+}
+
+static void
+main_loop(void)
+{
+ struct rte_mbuf *mbufs[32];
+ struct ether_hdr *eth_hdr;
+ struct rte_flow_error error;
+ uint16_t nb_rx;
+ uint16_t i;
+ uint16_t j;
+
+ while (!force_quit) {
+ for (i = 0; i < nr_queues; i++) {
+ nb_rx = rte_eth_rx_burst(port_id,
+ i, mbufs, 32);
+ if (nb_rx) {
+ for (j = 0; j < nb_rx; j++) {
+ struct rte_mbuf *m = mbufs[j];
+
+ eth_hdr = rte_pktmbuf_mtod(m,
+ struct ether_hdr *);
+ print_ether_addr("src=",
+ &eth_hdr->s_addr);
+ print_ether_addr(" - dst=",
+ &eth_hdr->d_addr);
+ printf(" - queue=0x%x",
+ (unsigned int)i);
+ printf("\n");
+
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+ }
+
+ /* closing and releasing resources */
+ rte_flow_flush(port_id, &error);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+}
+
+static void
+assert_link_status(void)
+{
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get(port_id, &link);
+ if (link.link_status == ETH_LINK_DOWN)
+ rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
+}
+
+static void
+init_port(void)
+{
+ int ret;
+ uint16_t i;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ /**< Header Split disabled */
+ .header_split = 0,
+ /**< IP checksum offload disabled */
+ .hw_ip_checksum = 0,
+ /**< VLAN filtering disabled */
+ .hw_vlan_filter = 0,
+ /**< Jumbo Frame Support disabled */
+ .jumbo_frame = 0,
+ /**< CRC stripped by hardware */
+ .hw_strip_crc = 1,
+ },
+ };
+
+ printf(":: initializing port: %d\n", port_id);
+ ret = rte_eth_dev_configure(port_id,
+ nr_queues, nr_queues, &port_conf);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ /* only set Rx queues: something we care only so far */
+ for (i = 0; i < nr_queues; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i, 512,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: Rx queue setup failed: err=%d, port=%u\n",
+ ret, port_id);
+ }
+ }
+
+ rte_eth_promiscuous_enable(port_id);
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ assert_link_status();
+
+ printf(":: initializing port: %d done\n", port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+ uint8_t nr_ports;
+ struct rte_flow_error error;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, ":: invalid EAL arguments\n");
+
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ nr_ports = rte_eth_dev_count();
+ if (nr_ports == 0)
+ rte_exit(EXIT_FAILURE, ":: no Ethernet ports found\n");
+ port_id = 0;
+ if (nr_ports != 1) {
+ printf(":: warn: %d ports detected, but we use only one: port %u\n",
+ nr_ports, port_id);
+ }
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 4096, 128, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ init_port();
+
+ /* create flow for send packet with */
+ flow = generate_ipv4_flow(port_id, selected_queue,
+ SRC_IP, EMPTY_MASK,
+ DEST_IP, FULL_MASK, &error);
+ if (!flow) {
+ printf("Flow can't be created %d message: %s\n",
+ error.type,
+ error.message ? error.message : "(no stated reason)");
+ rte_exit(EXIT_FAILURE, "error in creating flow");
+ }
+
+ main_loop();
+
+ return 0;
+}
diff --git a/examples/helloworld/main.c b/examples/helloworld/main.c
index 8b7a2de0..916b6ad0 100644
--- a/examples/helloworld/main.c
+++ b/examples/helloworld/main.c
@@ -38,7 +38,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 8c0e1791..5aefe098 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -58,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -154,7 +152,7 @@ struct rx_queue {
struct rte_mempool *indirect_pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
#define MAX_RX_QUEUE_PER_LCORE 16
@@ -240,7 +238,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -261,11 +259,12 @@ send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
static inline void
l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
- uint8_t queueid, uint8_t port_in)
+ uint8_t queueid, uint16_t port_in)
{
struct rx_queue *rxq;
uint32_t i, len, next_hop;
- uint8_t port_out, ipv6;
+ uint8_t ipv6;
+ uint16_t port_out;
int32_t len2;
ipv6 = 0;
@@ -403,7 +402,7 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -423,7 +422,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].portid;
RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
- (int) portid);
+ portid);
}
while (1) {
@@ -600,11 +599,12 @@ print_ethaddr(const char *name, struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -619,14 +619,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up .Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -708,7 +707,7 @@ parse_ptype(struct rte_mbuf *m)
/* callback function to detect packet type for a queue of a port */
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -876,7 +875,7 @@ main(int argc, char **argv)
uint16_t queueid = 0;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1035,7 +1034,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index dc7e0ddd..12ce0a1d 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -43,7 +43,7 @@ APP = ip_pipeline
VPATH += $(SRCDIR)/pipeline
-INC += $(wildcard *.h) $(wildcard pipeline/*.h)
+INC += $(sort $(wildcard *.h)) $(sort $(wildcard pipeline/*.h))
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index e41290e7..94e7a6df 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -428,10 +428,6 @@ struct app_eal_params {
/* Interrupt mode for VFIO (legacy|msi|msix) */
char *vfio_intr;
- /* Support running on Xen dom0 without hugetlbfs */
- uint32_t xen_dom0_present;
- int xen_dom0;
-
uint32_t parsed;
};
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 0b761346..3211c6ab 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -809,21 +809,6 @@ parse_eal(struct app_params *app,
continue;
}
- /* xen_dom0 */
- if (strcmp(entry->name, "xen_dom0") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->xen_dom0_present == 0),
- section_name,
- entry->name);
- p->xen_dom0_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->xen_dom0 = val;
- continue;
- }
-
/* unrecognized */
PARSE_ERROR_INVALID(0, section_name, entry->name);
}
@@ -2643,10 +2628,6 @@ save_eal_params(struct app_params *app, FILE *f)
if (p->vfio_intr)
fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
- if (p->xen_dom0_present)
- fprintf(f, "%s = %s\n", "xen_dom0",
- (p->xen_dom0) ? "yes" : "no");
-
fputc('\n', f);
}
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 7cde49a4..e56e4048 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -49,6 +49,7 @@
#include <rte_ip.h>
#include <rte_eal.h>
#include <rte_malloc.h>
+#include <rte_bus_pci.h>
#include "app.h"
#include "pipeline.h"
@@ -296,11 +297,6 @@ app_init_eal(struct app_params *app)
app->eal_argv[n_args++] = strdup(buffer);
}
- if ((p->xen_dom0_present) && (p->xen_dom0)) {
- snprintf(buffer, sizeof(buffer), "--xen-dom0");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
snprintf(buffer, sizeof(buffer), "--");
app->eal_argv[n_args++] = strdup(buffer);
@@ -1236,7 +1232,7 @@ app_init_tap(struct app_params *app)
#ifdef RTE_LIBRTE_KNI
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
int ret = 0;
if (port_id >= rte_eth_dev_count())
@@ -1250,7 +1246,7 @@ kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
}
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
int ret;
if (port_id >= rte_eth_dev_count())
diff --git a/examples/ip_pipeline/pipeline/hash_func.h b/examples/ip_pipeline/pipeline/hash_func.h
index b112369c..42128c1f 100644
--- a/examples/ip_pipeline/pipeline/hash_func.h
+++ b/examples/ip_pipeline/pipeline/hash_func.h
@@ -34,48 +34,56 @@
#define __INCLUDE_HASH_FUNC_H__
static inline uint64_t
-hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = seed ^ k[0];
+ xor0 = seed ^ (k[0] & m[0]);
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = (k[0] ^ seed) ^ k[1];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = (k[0] ^ seed) ^ k[1];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor0 ^= k[2];
+ xor0 ^= k[2] & m[2];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
xor0 ^= xor1;
@@ -83,30 +91,34 @@ hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
xor0 ^= xor1;
- xor0 ^= k[4];
+ xor0 ^= k[4] & m[4];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
xor0 ^= xor1;
@@ -116,17 +128,19 @@ hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
xor0 ^= xor1;
- xor2 ^= k[6];
+ xor2 ^= k[6] & m[6];
xor0 ^= xor2;
@@ -134,15 +148,17 @@ hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2, xor3;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
- xor3 = k[6] ^ k[7];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
xor0 ^= xor1;
xor2 ^= xor3;
@@ -157,26 +173,30 @@ hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
#include <x86intrin.h>
static inline uint64_t
-hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t crc0;
- crc0 = _mm_crc32_u64(seed, k[0]);
+ crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
return crc0;
}
static inline uint64_t
-hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, crc0, crc1;
- k0 = k[0];
+ k0 = k[0] & m[0];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
crc0 ^= crc1;
@@ -184,16 +204,18 @@ hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
crc0 = _mm_crc32_u64(crc0, k2);
@@ -203,18 +225,20 @@ hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
crc3 = k2 >> 32;
crc0 = _mm_crc32_u64(crc0, crc1);
@@ -226,19 +250,21 @@ hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
crc0 = _mm_crc32_u64(crc0, crc1);
crc1 = _mm_crc32_u64(crc2, crc3);
@@ -249,20 +275,22 @@ hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, k5);
@@ -273,22 +301,24 @@ hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
- crc4 = _mm_crc32_u64(k5, k[6]);
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
crc5 = k5 >> 32;
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
@@ -300,23 +330,25 @@ hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
- crc4 = _mm_crc32_u64(k5, k[6]);
- crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
@@ -335,6 +367,8 @@ hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
#define hash_default_key56 hash_crc_key56
#define hash_default_key64 hash_crc_key64
+#elif defined(RTE_ARCH_ARM64)
+#include "hash_func_arm64.h"
#else
#define hash_default_key8 hash_xor_key8
diff --git a/examples/ip_pipeline/pipeline/hash_func_arm64.h b/examples/ip_pipeline/pipeline/hash_func_arm64.h
new file mode 100644
index 00000000..ae6c0f41
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/hash_func_arm64.h
@@ -0,0 +1,261 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Linaro Limited. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __HASH_FUNC_ARM64_H__
+#define __HASH_FUNC_ARM64_H__
+
+#define _CRC32CX(crc, val) \
+ __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint32_t crc0;
+
+ crc0 = seed;
+ _CRC32CX(crc0, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ _CRC32CX(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, k5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+ _CRC32CX(crc5, k[7] & m[7]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 9ef50cc9..70b19381 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -88,8 +88,10 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
uint32_t *signature)
{
uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint8_t m[PIPELINE_FC_FLOW_KEY_MAX_SIZE]; /* key mask */
void *key_buffer = (key_out) ? key_out : buffer;
+ memset(m, 0xFF, sizeof(m));
switch (key_in->type) {
case FLOW_KEY_QINQ:
{
@@ -101,7 +103,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
if (signature)
- *signature = (uint32_t) hash_default_key8(qinq, 8, 0);
+ *signature = (uint32_t) hash_default_key8(qinq, m, 8, 0);
return 0;
}
@@ -118,7 +120,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
if (signature)
- *signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
+ *signature = (uint32_t) hash_default_key16(ipv4, m, 16, 0);
return 0;
}
@@ -136,7 +138,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
if (signature)
- *signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
+ *signature = (uint32_t) hash_default_key64(ipv6, m, 64, 0);
return 0;
}
@@ -832,12 +834,12 @@ app_pipeline_fc_add_bulk(struct app_params *app,
}
/* Free resources */
- app_msg_free(app, rsp);
for (i = rsp->n_keys; i < n_keys; i++)
if (new_flow[i])
rte_free(flow[i]);
+ app_msg_free(app, rsp);
rte_free(flow_rsp);
rte_free(flow_req);
rte_free(new_flow);
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 026f00cd..929d81cb 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -492,40 +492,16 @@ static void *pipeline_fc_init(struct pipeline_params *params,
/* Tables */
p->n_tables = 1;
{
- struct rte_table_hash_key8_ext_params
- table_hash_key8_params = {
- .n_entries = p_fc->n_flows,
- .n_entries_ext = p_fc->n_flows,
- .signature_offset = p_fc->hash_offset,
+ struct rte_table_hash_params table_hash_params = {
+ .name = p->name,
+ .key_size = p_fc->key_size,
.key_offset = p_fc->key_offset,
- .f_hash = hash_func[(p_fc->key_size / 8) - 1],
.key_mask = (p_fc->key_mask_present) ?
p_fc->key_mask : NULL,
- .seed = 0,
- };
-
- struct rte_table_hash_key16_ext_params
- table_hash_key16_params = {
- .n_entries = p_fc->n_flows,
- .n_entries_ext = p_fc->n_flows,
- .signature_offset = p_fc->hash_offset,
- .key_offset = p_fc->key_offset,
- .f_hash = hash_func[(p_fc->key_size / 8) - 1],
- .key_mask = (p_fc->key_mask_present) ?
- p_fc->key_mask : NULL,
- .seed = 0,
- };
-
- struct rte_table_hash_ext_params
- table_hash_params = {
- .key_size = p_fc->key_size,
.n_keys = p_fc->n_flows,
- .n_buckets = p_fc->n_flows / 4,
- .n_buckets_ext = p_fc->n_flows / 4,
+ .n_buckets = rte_align32pow2(p_fc->n_flows / 4),
.f_hash = hash_func[(p_fc->key_size / 8) - 1],
.seed = 0,
- .signature_offset = p_fc->hash_offset,
- .key_offset = p_fc->key_offset,
};
struct rte_pipeline_table_params table_params = {
@@ -542,32 +518,19 @@ static void *pipeline_fc_init(struct pipeline_params *params,
switch (p_fc->key_size) {
case 8:
- if (p_fc->hash_offset != 0) {
- table_params.ops =
- &rte_table_hash_key8_ext_ops;
- } else {
- table_params.ops =
- &rte_table_hash_key8_ext_dosig_ops;
- }
- table_params.arg_create = &table_hash_key8_params;
+ table_params.ops = &rte_table_hash_key8_ext_ops;
break;
case 16:
- if (p_fc->hash_offset != 0) {
- table_params.ops =
- &rte_table_hash_key16_ext_ops;
- } else {
- table_params.ops =
- &rte_table_hash_key16_ext_dosig_ops;
- }
- table_params.arg_create = &table_hash_key16_params;
+ table_params.ops = &rte_table_hash_key16_ext_ops;
break;
default:
table_params.ops = &rte_table_hash_ext_ops;
- table_params.arg_create = &table_hash_params;
}
+ table_params.arg_create = &table_hash_params;
+
status = rte_pipeline_table_create(p->p,
&table_params,
&p->table_id[0]);
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
index 8cb2f0c7..93eedbe8 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -102,7 +102,7 @@ pkt_work_dma(
/* Read (dma_dst), compute (hash), write (hash) */
if (hash_enabled) {
- uint32_t hash = p->f_hash(dma_dst, dma_size, 0);
+ uint32_t hash = p->f_hash(dma_src, dma_mask, dma_size, 0);
*dma_hash = hash;
if (lb_hash) {
@@ -173,10 +173,10 @@ pkt4_work_dma(
/* Read (dma_dst), compute (hash), write (hash) */
if (hash_enabled) {
- uint32_t hash0 = p->f_hash(dma_dst0, dma_size, 0);
- uint32_t hash1 = p->f_hash(dma_dst1, dma_size, 0);
- uint32_t hash2 = p->f_hash(dma_dst2, dma_size, 0);
- uint32_t hash3 = p->f_hash(dma_dst3, dma_size, 0);
+ uint32_t hash0 = p->f_hash(dma_src0, dma_mask, dma_size, 0);
+ uint32_t hash1 = p->f_hash(dma_src1, dma_mask, dma_size, 0);
+ uint32_t hash2 = p->f_hash(dma_src2, dma_mask, dma_size, 0);
+ uint32_t hash3 = p->f_hash(dma_src3, dma_mask, dma_size, 0);
*dma_hash0 = hash0;
*dma_hash1 = hash1;
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
index 78317165..0414f248 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -1349,17 +1349,20 @@ pipeline_routing_init(struct pipeline_params *params,
/* ARP table configuration */
if (p_rt->params.n_arp_entries) {
- struct rte_table_hash_key8_ext_params table_arp_params = {
- .n_entries = p_rt->params.n_arp_entries,
- .n_entries_ext = p_rt->params.n_arp_entries,
+ struct rte_table_hash_params table_arp_params = {
+ .name = p->name,
+ .key_size = 8,
+ .key_offset = p_rt->params.arp_key_offset,
+ .key_mask = NULL,
+ .n_keys = p_rt->params.n_arp_entries,
+ .n_buckets =
+ rte_align32pow2(p_rt->params.n_arp_entries / 4),
.f_hash = hash_default_key8,
.seed = 0,
- .signature_offset = 0, /* Unused */
- .key_offset = p_rt->params.arp_key_offset,
};
struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_hash_key8_ext_dosig_ops,
+ .ops = &rte_table_hash_key8_ext_ops,
.arg_create = &table_arp_params,
.f_action_hit = get_arp_table_ah_hit(p_rt),
.f_action_miss = NULL,
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index e62636cb..756f90ef 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -49,7 +49,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -59,7 +58,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -84,10 +82,10 @@
#define MAX_JUMBO_PKT_LEN 9600
#define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
-#define MBUF_SIZE \
- (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define NB_MBUF 8192
+#define MEMPOOL_CACHE_SIZE 256
/* allow max jumbo frame 9.5 KB */
#define JUMBO_FRAME_MAX_SIZE 0x2600
@@ -166,7 +164,7 @@ struct rx_queue {
struct rte_mempool *pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
struct tx_lcore_stat {
@@ -277,7 +275,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
* send burst of packets on an output interface.
*/
static inline uint32_t
-send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
{
uint32_t fill, len, k, n;
struct mbuf_table *txmb;
@@ -307,7 +305,7 @@ send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t fill, lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -337,7 +335,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
}
static inline void
-reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
+reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
struct lcore_queue_conf *qconf, uint64_t tms)
{
struct ether_hdr *eth_hdr;
@@ -346,7 +344,7 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
struct rx_queue *rxq;
void *d_addr_bytes;
uint32_t next_hop;
- uint8_t dst_port;
+ uint16_t dst_port;
rxq = &qconf->rx_queue_list[queue];
@@ -454,7 +452,7 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t diff_tsc, cur_tsc, prev_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -473,7 +471,7 @@ main_loop(__attribute__((unused)) void *dummy)
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].portid;
- RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
+ RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
portid);
}
@@ -732,11 +730,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -751,14 +750,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -909,11 +907,11 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
- if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
- RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
+ rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
+ MBUF_DATA_SIZE, socket);
+ if (rxq->pool == NULL) {
+ RTE_LOG(ERR, IP_RSMBL,
+ "rte_pktmbuf_pool_create(%s) failed", buf);
return -1;
}
@@ -987,7 +985,7 @@ queue_dump_stat(void)
qconf = &lcore_queue_conf[lcore];
for (i = 0; i < qconf->n_rx_queue; i++) {
- fprintf(stdout, " -- lcoreid=%u portid=%hhu "
+ fprintf(stdout, " -- lcoreid=%u portid=%u "
"frag tbl stat:\n",
lcore, qconf->rx_queue_list[i].portid);
rte_ip_frag_table_statistics_dump(stdout,
@@ -1024,7 +1022,7 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1177,7 +1175,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
signal(SIGUSR1, signal_handler);
signal(SIGTERM, signal_handler);
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index 17e91551..9fd33cb7 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -38,6 +38,12 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build ipsec-secgw")
+endif
+endif
+
APP = ipsec-secgw
CFLAGS += -O3 -gdwarf-2
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 70bb81f7..c3efe52b 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -58,8 +58,11 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_sym_op *sym_cop;
int32_t payload_len, ip_hdr_len;
- RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ return 0;
+
+ RTE_ASSERT(m != NULL);
RTE_ASSERT(cop != NULL);
ip4 = rte_pktmbuf_mtod(m, struct ip *);
@@ -103,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
@@ -154,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
@@ -175,29 +178,44 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ }
+
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
- nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
- rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
- pad_len = nexthdr - 1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
+ nexthdr = &m->inner_esp_next_proto;
+ } else {
+ nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
+ pad_len = nexthdr - 1;
+
+ padding = pad_len - *pad_len;
+ for (i = 0; i < *pad_len; i++) {
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
+ return -EINVAL;
+ }
+ }
- padding = pad_len - *pad_len;
- for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i + 1) {
- RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
+ if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "failed to remove pad_len + digest\n");
return -EINVAL;
}
}
- if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- RTE_LOG(ERR, IPSEC_ESP,
- "failed to remove pad_len + digest\n");
- return -EINVAL;
- }
-
if (unlikely(sa->flags == TRANSPORT)) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
@@ -211,7 +229,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
/* XXX No option headers supported */
memmove(ip6, ip, sizeof(struct ip6_hdr));
ip6->ip6_nxt = *nexthdr;
- ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
}
} else
ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
@@ -226,14 +245,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct ip *ip4;
struct ip6_hdr *ip6;
struct esp_hdr *esp = NULL;
- uint8_t *padding, *new_ip, nlp;
+ uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
uint16_t pad_payload_len, pad_len, ip_hdr_len;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
- RTE_ASSERT(cop != NULL);
ip_hdr_len = 0;
@@ -283,12 +301,19 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
return -EINVAL;
}
- padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
- if (unlikely(padding == NULL)) {
- RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
- return -ENOSPC;
+ /* Add trailer padding if it is not constructed by HW */
+ if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
+ sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
}
- rte_prefetch0(padding);
switch (sa->flags) {
case IP4_TUNNEL:
@@ -306,14 +331,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sizeof(struct esp_hdr) + sa->iv_len);
memmove(new_ip, ip4, ip_hdr_len);
esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ ip4 = (struct ip *)new_ip;
if (likely(ip4->ip_v == IPVERSION)) {
- ip4 = (struct ip *)new_ip;
ip4->ip_p = IPPROTO_ESP;
ip4->ip_len = htons(rte_pktmbuf_data_len(m));
} else {
ip6 = (struct ip6_hdr *)new_ip;
ip6->ip6_nxt = IPPROTO_ESP;
- ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
}
}
@@ -321,15 +347,46 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
esp->spi = rte_cpu_to_be_32(sa->spi);
esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
+ /* set iv */
uint64_t *iv = (uint64_t *)(esp + 1);
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ *iv = rte_cpu_to_be_64(sa->seq);
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ memset(iv, 0, sa->iv_len);
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ *iv = rte_cpu_to_be_64(sa->seq);
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP,
+ "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+ }
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
+ /* Set the inner esp next protocol for HW trailer */
+ m->inner_esp_next_proto = nlp;
+ m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ } else {
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+ }
+ goto done;
+ }
+
+ RTE_ASSERT(cop != NULL);
sym_cop = get_sym_cop(cop);
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
uint8_t *aad;
- *iv = sa->seq;
sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
@@ -342,30 +399,28 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct cnt_blk *icb = get_cnt_blk(m);
icb->salt = sa->salt;
- icb->iv = sa->seq;
+ icb->iv = rte_cpu_to_be_64(sa->seq);
icb->cnt = rte_cpu_to_be_32(1);
aad = get_aad(m);
memcpy(aad, esp, 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_AES_CBC:
- memset(iv, 0, sa->iv_len);
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- *iv = sa->seq;
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
@@ -384,7 +439,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct cnt_blk *icb = get_cnt_blk(m);
icb->salt = sa->salt;
- icb->iv = sa->seq;
+ icb->iv = rte_cpu_to_be_64(sa->seq);
icb->cnt = rte_cpu_to_be_32(1);
switch (sa->auth_algo) {
@@ -403,25 +458,30 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
+done:
return 0;
}
int
-esp_outbound_post(struct rte_mbuf *m __rte_unused,
- struct ipsec_sa *sa __rte_unused,
- struct rte_crypto_op *cop)
+esp_outbound_post(struct rte_mbuf *m,
+ struct ipsec_sa *sa,
+ struct rte_crypto_op *cop)
{
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
- RTE_ASSERT(cop != NULL);
- if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
- return -1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ m->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ } else {
+ RTE_ASSERT(cop != NULL);
+ if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ return -1;
+ }
}
return 0;
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index fa5cc8af..23601e37 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -35,16 +35,6 @@
struct mbuf;
-/* RFC4303 */
-struct esp_hdr {
- uint32_t spi;
- uint32_t seq;
- /* Payload */
- /* Padding */
- /* Pad Length */
- /* Next Header */
- /* Integrity Check Value - ICV */
-};
int
esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index ff1dccdb..93393d52 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -72,7 +72,8 @@ ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
/* Per RFC4301 5.1.2.1 */
outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
- outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
outip6->ip6_nxt = IPPROTO_ESP;
outip6->ip6_hops = IPDEFTTL;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 99dc270c..c98454a9 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -57,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -161,14 +160,15 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
static uint32_t single_sa_idx;
+static uint32_t frame_size;
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -204,11 +204,9 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP,
+ .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -290,7 +288,7 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
}
static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
{
struct ip *ip;
struct ether_hdr *ethhdr;
@@ -320,7 +318,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
}
static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
{
int32_t i;
const int32_t prefetch_offset = 2;
@@ -336,7 +334,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
/* Send burst of packets on an output interface */
static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int32_t ret;
@@ -359,7 +357,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
@@ -646,7 +644,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint8_t portid)
+ uint8_t nb_pkts, uint16_t portid)
{
struct ipsec_traffic traffic;
@@ -691,7 +689,8 @@ main_loop(__attribute__((unused)) void *dummy)
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int32_t i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int32_t socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -728,7 +727,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -759,7 +758,8 @@ main_loop(__attribute__((unused)) void *dummy)
static int32_t
check_params(void)
{
- uint8_t lcore, portid, nb_ports;
+ uint8_t lcore;
+ uint16_t portid, nb_ports;
uint16_t i;
int32_t socket_id;
@@ -797,7 +797,7 @@ check_params(void)
}
static uint8_t
-get_port_nb_rx_queues(const uint8_t port)
+get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
uint16_t i;
@@ -843,6 +843,7 @@ print_usage(const char *prgname)
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
+ " -j FRAMESIZE: jumbo frame maximum size\n"
" --"OPTION_CONFIG": (port,queue,lcore): "
"rx queues configuration\n"
" --single-sa SAIDX: use single SA index for outbound, "
@@ -981,7 +982,7 @@ parse_args(int32_t argc, char **argv)
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:Pu:f:",
+ while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -1020,6 +1021,23 @@ parse_args(int32_t argc, char **argv)
}
f_present = 1;
break;
+ case 'j':
+ {
+ int32_t size = parse_decimal(optarg);
+ if (size <= 1518) {
+ printf("Invalid jumbo frame size\n");
+ if (size < 0) {
+ print_usage(prgname);
+ return -1;
+ }
+ printf("Using default value 9000\n");
+ frame_size = 9000;
+ } else {
+ frame_size = size;
+ }
+ }
+ printf("Enabled jumbo frames size %u\n", frame_size);
+ break;
case 0:
if (parse_args_long_options(lgopts, option_index)) {
print_usage(prgname);
@@ -1055,11 +1073,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1074,14 +1093,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (uint32_t)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -1113,7 +1131,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
uint16_t qp, struct lcore_params *params,
struct ipsec_ctx *ipsec_ctx,
const struct rte_cryptodev_capabilities *cipher,
- const struct rte_cryptodev_capabilities *auth)
+ const struct rte_cryptodev_capabilities *auth,
+ const struct rte_cryptodev_capabilities *aead)
{
int32_t ret = 0;
unsigned long i;
@@ -1124,6 +1143,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
key.cipher_algo = cipher->sym.cipher.algo;
if (auth)
key.auth_algo = auth->sym.auth.algo;
+ if (aead)
+ key.aead_algo = aead->sym.aead.algo;
ret = rte_hash_lookup(map, &key);
if (ret != -ENOENT)
@@ -1192,6 +1213,12 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
continue;
+ if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret |= add_mapping(map, str, cdev_id, qp, params,
+ ipsec_ctx, NULL, NULL, i);
+ continue;
+ }
+
if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
continue;
@@ -1204,7 +1231,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
continue;
ret |= add_mapping(map, str, cdev_id, qp, params,
- ipsec_ctx, i, j);
+ ipsec_ctx, i, j, NULL);
}
}
@@ -1322,7 +1349,7 @@ cryptodevs_init(void)
}
static void
-port_init(uint8_t portid)
+port_init(uint16_t portid)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
@@ -1357,6 +1384,16 @@ port_init(uint8_t portid)
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
+ if (frame_size) {
+ port_conf.rxmode.max_rx_pkt_len = frame_size;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+ port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
&port_conf);
if (ret < 0)
@@ -1421,11 +1458,14 @@ static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
char s[64];
+ uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
+ RTE_MBUF_DEFAULT_BUF_SIZE;
+
snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- RTE_MBUF_DEFAULT_BUF_SIZE,
+ buff_size,
socket_id);
if (ctx->mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
@@ -1438,8 +1478,9 @@ int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
- uint32_t lcore_id, nb_ports;
- uint8_t portid, socket_id;
+ uint32_t lcore_id;
+ uint8_t socket_id;
+ uint16_t portid, nb_ports;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1522,7 +1563,7 @@ main(int32_t argc, char **argv)
rte_eth_promiscuous_enable(portid);
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 0afb9d67..c24284d6 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -37,7 +37,9 @@
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_crypto.h>
+#include <rte_security.h>
#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_hash.h>
@@ -49,21 +51,28 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
- int32_t ret;
+ int32_t ret = 0;
struct cdev_key key = { 0 };
key.lcore_id = (uint8_t)rte_lcore_id();
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
+ key.aead_algo = (uint8_t)sa->aead_algo;
- ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
- (void **)&cdev_id_qp);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
- "auth_algo %u\n", key.lcore_id, key.cipher_algo,
- key.auth_algo);
- return -1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+ (void **)&cdev_id_qp);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "No cryptodev: core %u, cipher_algo %u, "
+ "auth_algo %u, aead_algo %u\n",
+ key.lcore_id,
+ key.cipher_algo,
+ key.auth_algo,
+ key.aead_algo);
+ return -1;
+ }
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -71,23 +80,153 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_pool);
-
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
- if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
- ret = rte_cryptodev_queue_pair_attach_sym_session(
- ipsec_ctx->tbl[cdev_id_qp].id,
- ipsec_ctx->tbl[cdev_id_qp].qp,
- sa->crypto_session);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
- "Session cannot be attached to qp %u ",
- ipsec_ctx->tbl[cdev_id_qp].qp);
- return -1;
+ if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ },
+ .crypto_xform = sa->xforms
+
+ };
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_cryptodev_get_sec_ctx(
+ ipsec_ctx->tbl[cdev_id_qp].id);
+
+ if (sess_conf.ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ struct rte_security_ipsec_tunnel_param *tunnel =
+ &sess_conf.ipsec.tunnel;
+ if (sa->flags == IP4_TUNNEL) {
+ tunnel->type =
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4;
+ tunnel->ipv4.ttl = IPDEFTTL;
+
+ memcpy((uint8_t *)&tunnel->ipv4.src_ip,
+ (uint8_t *)&sa->src.ip.ip4, 4);
+
+ memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
+ (uint8_t *)&sa->dst.ip.ip4, 4);
+ }
+ /* TODO support for Transport and IPV6 tunnel */
+ }
+
+ sa->sec_session = rte_security_session_create(ctx,
+ &sess_conf, ipsec_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ struct rte_flow_error err;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ const struct rte_security_capability *sec_cap;
+
+ sa->sec_session = rte_security_session_create(ctx,
+ &sess_conf, ipsec_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
+
+ sec_cap = rte_security_capabilities_get(ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action !=
+ RTE_SECURITY_ACTION_TYPE_NONE) {
+
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
+
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
+
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ if (sa->flags & IP6_TUNNEL) {
+ sa->pattern[1].spec = &sa->ipv6_spec;
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else {
+ sa->pattern[1].spec = &sa->ipv4_spec;
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
+
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = sa->spi;
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = sa->sec_session;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->flow = rte_flow_create(sa->portid,
+ &sa->attr, sa->pattern, sa->action, &err);
+ if (sa->flow == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "Failed to create ipsec flow msg: %s\n",
+ err.message);
+ return -1;
+ }
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_pool);
+
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
+ ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].id,
+ ipsec_ctx->tbl[cdev_id_qp].qp,
+ sa->crypto_session);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Session cannot be attached to qp %u\n",
+ ipsec_ctx->tbl[cdev_id_qp].qp);
+ return -1;
+ }
}
}
sa->cdev_id_qp = cdev_id_qp;
@@ -125,7 +264,9 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
{
int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
+ struct rte_crypto_sym_op *sym_cop;
struct ipsec_sa *sa;
+ struct cdev_qp *cqp;
for (i = 0; i < nb_pkts; i++) {
if (unlikely(sas[i] == NULL)) {
@@ -140,23 +281,76 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa = sas[i];
priv->sa = sa;
- priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
- rte_crypto_op_attach_sym_session(&priv->cop,
- sa->crypto_session);
-
- ret = xform_func(pkts[i], sa, &priv->cop);
- if (unlikely(ret)) {
- rte_pktmbuf_free(pkts[i]);
+ switch (sa->type) {
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->sec_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ sym_cop = get_sym_cop(&priv->cop);
+ sym_cop->m_src = pkts[i];
+
+ rte_security_attach_session(&priv->cop,
+ sa->sec_session);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_NONE:
+
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->crypto_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ rte_crypto_op_attach_sym_session(&priv->cop,
+ sa->crypto_session);
+
+ ret = xform_func(pkts[i], sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->sec_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ rte_security_attach_session(&priv->cop,
+ sa->sec_session);
+
+ ret = xform_func(pkts[i], sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ cqp = &ipsec_ctx->tbl[sa->cdev_id_qp];
+ cqp->ol_pkts[cqp->ol_pkts_cnt++] = pkts[i];
+ if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
+ rte_security_set_pkt_metadata(
+ sa->security_ctx,
+ sa->sec_session, pkts[i], NULL);
continue;
}
@@ -167,7 +361,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
- struct rte_mbuf *pkts[], uint16_t max_pkts)
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
{
int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
struct ipsec_mbuf_metadata *priv;
@@ -182,6 +376,19 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
+ while (cqp->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
+ pkt = cqp->ol_pkts[--cqp->ol_pkts_cnt];
+ rte_prefetch0(pkt);
+ priv = get_priv(pkt);
+ sa = priv->sa;
+ ret = xform_func(pkt, sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ pkts[nb_pkts++] = pkt;
+ }
+
if (cqp->in_flight == 0)
continue;
@@ -199,11 +406,14 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
RTE_ASSERT(sa != NULL);
- ret = xform_func(pkt, sa, cops[j]);
- if (unlikely(ret))
- rte_pktmbuf_free(pkt);
- else
- pkts[nb_pkts++] = pkt;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ ret = xform_func(pkt, sa, cops[j]);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ }
+ pkts[nb_pkts++] = pkt;
}
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index da1fb1b2..775b316f 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -38,6 +38,8 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_security.h>
+#include <rte_flow.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
@@ -99,7 +101,10 @@ struct ipsec_sa {
uint32_t cdev_id_qp;
uint64_t seq;
uint32_t salt;
- struct rte_cryptodev_sym_session *crypto_session;
+ union {
+ struct rte_cryptodev_sym_session *crypto_session;
+ struct rte_security_session *sec_session;
+ };
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
enum rte_crypto_aead_algorithm aead_algo;
@@ -117,7 +122,28 @@ struct ipsec_sa {
uint8_t auth_key[MAX_KEY_SIZE];
uint16_t auth_key_len;
uint16_t aad_len;
- struct rte_crypto_sym_xform *xforms;
+ union {
+ struct rte_crypto_sym_xform *xforms;
+ struct rte_security_ipsec_xform *sec_xform;
+ };
+ enum rte_security_session_action_type type;
+ enum rte_security_ipsec_sa_direction direction;
+ uint16_t portid;
+ struct rte_security_ctx *security_ctx;
+ uint32_t ol_flags;
+
+#define MAX_RTE_FLOW_PATTERN (4)
+#define MAX_RTE_FLOW_ACTIONS (2)
+ struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN];
+ struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS];
+ struct rte_flow_attr attr;
+ union {
+ struct rte_flow_item_ipv4 ipv4_spec;
+ struct rte_flow_item_ipv6 ipv6_spec;
+ };
+ struct rte_flow_item_esp esp_spec;
+ struct rte_flow *flow;
+ struct rte_security_session_conf sess_conf;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@@ -133,6 +159,8 @@ struct cdev_qp {
uint16_t in_flight;
uint16_t len;
struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ uint16_t ol_pkts_cnt;
};
struct ipsec_ctx {
@@ -150,6 +178,7 @@ struct cdev_key {
uint16_t lcore_id;
uint8_t cipher_algo;
uint8_t auth_algo;
+ uint8_t aead_algo;
};
struct socket_ctx {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 7be0e628..4c448e5c 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -41,16 +41,20 @@
#include <rte_memzone.h>
#include <rte_crypto.h>
+#include <rte_security.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_random.h>
+#include <rte_ethdev.h>
#include "ipsec.h"
#include "esp.h"
#include "parser.h"
+#define IPDEFTTL 64
+
struct supported_cipher_algo {
const char *keyword;
enum rte_crypto_cipher_algorithm algo;
@@ -238,6 +242,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
+ uint32_t type_p = 0;
+ uint32_t portid_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
@@ -375,7 +381,6 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
rule->auth_algo = algo->algo;
rule->auth_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
- rule->aad_len = algo->key_len;
/* NULL algorithm and combined algos do not
* require auth key
@@ -431,7 +436,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
rule->aead_algo = algo->algo;
rule->cipher_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
- rule->aad_len = algo->key_len;
+ rule->aad_len = algo->aad_len;
rule->block_size = algo->block_size;
rule->iv_len = algo->iv_len;
@@ -550,6 +555,52 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
+ if (strcmp(tokens[ti], "type") == 0) {
+ APP_CHECK_PRESENCE(type_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
+ else if (strcmp(tokens[ti],
+ "inline-protocol-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
+ else if (strcmp(tokens[ti],
+ "lookaside-protocol-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
+ else if (strcmp(tokens[ti], "no-offload") == 0)
+ rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ else {
+ APP_CHECK(0, status, "Invalid input \"%s\"",
+ tokens[ti]);
+ return;
+ }
+
+ type_p = 1;
+ continue;
+ }
+
+ if (strcmp(tokens[ti], "port_id") == 0) {
+ APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+ rule->portid = atoi(tokens[ti]);
+ if (status->status < 0)
+ return;
+ portid_p = 1;
+ continue;
+ }
+
/* unrecognizeable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
@@ -580,6 +631,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
+ if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
+ printf("Missing portid option, falling back to non-offload\n");
+
+ if (!type_p || !portid_p) {
+ rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ rule->portid = -1;
+ }
+
*ri = *ri + 1;
}
@@ -647,9 +706,11 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
struct sa_ctx {
struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
- struct {
- struct rte_crypto_sym_xform a;
- struct rte_crypto_sym_xform b;
+ union {
+ struct {
+ struct rte_crypto_sym_xform a;
+ struct rte_crypto_sym_xform b;
+ };
} xf[IPSEC_SA_MAX_ENTRIES];
};
@@ -682,6 +743,33 @@ sa_create(const char *name, int32_t socket_id)
}
static int
+check_eth_dev_caps(uint16_t portid, uint32_t inbound)
+{
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(portid, &dev_info);
+
+ if (inbound) {
+ if ((dev_info.rx_offload_capa &
+ DEV_RX_OFFLOAD_SECURITY) == 0) {
+ RTE_LOG(WARNING, PORT,
+ "hardware RX IPSec offload is not supported\n");
+ return -EINVAL;
+ }
+
+ } else { /* outbound */
+ if ((dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_SECURITY) == 0) {
+ RTE_LOG(WARNING, PORT,
+ "hardware TX IPSec offload is not supported\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+
+static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound)
{
@@ -700,6 +788,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
*sa = entries[i];
sa->seq = 0;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (check_eth_dev_caps(sa->portid, inbound))
+ return -EINVAL;
+ }
+
+ sa->direction = (inbound == 1) ?
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
+
switch (sa->flags) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
@@ -709,37 +807,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
iv_length = 16;
- if (inbound) {
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
- sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.aead.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.aead.op =
- RTE_CRYPTO_AEAD_OP_DECRYPT;
- sa_ctx->xf[idx].a.next = NULL;
- sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
- sa_ctx->xf[idx].a.aead.iv.length = iv_length;
- sa_ctx->xf[idx].a.aead.aad_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.aead.digest_length =
- sa->digest_len;
- } else { /* outbound */
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
- sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.aead.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.aead.op =
- RTE_CRYPTO_AEAD_OP_ENCRYPT;
- sa_ctx->xf[idx].a.next = NULL;
- sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
- sa_ctx->xf[idx].a.aead.iv.length = iv_length;
- sa_ctx->xf[idx].a.aead.aad_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.aead.digest_length =
- sa->digest_len;
- }
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
+ RTE_CRYPTO_AEAD_OP_DECRYPT :
+ RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
sa->xforms = &sa_ctx->xf[idx].a;
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 9a13d353..83ac0d80 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -116,7 +114,7 @@ static struct ether_addr ports_eth_addr[MAX_PORTS];
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
-static uint8_t nb_ports = 0;
+static uint16_t nb_ports;
static int rx_queue_per_lcore = 1;
@@ -195,7 +193,7 @@ static struct mcast_group_params mcast_group_table[] = {
/* Send burst of packets on an output interface */
static void
-send_burst(struct lcore_queue_conf *qconf, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t port)
{
struct rte_mbuf **m_table;
uint16_t n, queueid;
@@ -312,7 +310,7 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
*/
static inline void
mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
- struct lcore_queue_conf *qconf, uint8_t port)
+ struct lcore_queue_conf *qconf, uint16_t port)
{
struct ether_hdr *ethdr;
uint16_t len;
@@ -343,7 +341,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
struct ipv4_hdr *iphdr;
uint32_t dest_addr, port_mask, port_num, use_clone;
int32_t hash;
- uint8_t port;
+ uint16_t port;
union {
uint64_t as_int;
struct ether_addr as_addr;
@@ -407,7 +405,7 @@ static inline void
send_timeout_burst(struct lcore_queue_conf *qconf)
{
uint64_t cur_tsc;
- uint8_t portid;
+ uint16_t portid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
cur_tsc = rte_rdtsc();
@@ -428,7 +426,7 @@ main_loop(__rte_unused void *dummy)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
lcore_id = rte_lcore_id();
@@ -448,7 +446,7 @@ main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i];
RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
- lcore_id, (int) portid);
+ lcore_id, portid);
}
while (1) {
@@ -610,11 +608,12 @@ init_mcast_hash(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -629,14 +628,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -673,7 +671,7 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/kni/main.c b/examples/kni/main.c
index e3bc2fb7..3f173854 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -53,7 +53,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@ -61,7 +60,7 @@
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -110,7 +109,7 @@
* Structure of port parameters
*/
struct kni_port_params {
- uint8_t port_id;/* Port ID */
+ uint16_t port_id;/* Port ID */
unsigned lcore_rx; /* lcore ID for RX */
unsigned lcore_tx; /* lcore ID for TX */
uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
@@ -162,8 +161,8 @@ struct kni_interface_stats {
/* kni device statistics array */
static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
-static int kni_change_mtu(uint8_t port_id, unsigned new_mtu);
-static int kni_config_network_interface(uint8_t port_id, uint8_t if_up);
+static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
+static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
@@ -171,7 +170,7 @@ static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
static void
print_stats(void)
{
- uint8_t i;
+ uint16_t i;
printf("\n**KNI example application statistics**\n"
"====== ============== ============ ============ ============ ============\n"
@@ -238,7 +237,8 @@ kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
static void
kni_ingress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_rx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
@@ -274,7 +274,8 @@ kni_ingress(struct kni_port_params *p)
static void
kni_egress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_tx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
@@ -416,7 +417,7 @@ parse_config(const char *arg)
int i, j, nb_token;
char *str_fld[_NUM_FLD];
unsigned long int_fld[_NUM_FLD];
- uint8_t port_id, nb_kni_port_params = 0;
+ uint16_t port_id, nb_kni_port_params = 0;
memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
while (((p = strchr(p0, '(')) != NULL) &&
@@ -445,7 +446,7 @@ parse_config(const char *arg)
}
i = 0;
- port_id = (uint8_t)int_fld[i++];
+ port_id = int_fld[i++];
if (port_id >= RTE_MAX_ETHPORTS) {
printf("Port ID %d could not exceed the maximum %d\n",
port_id, RTE_MAX_ETHPORTS);
@@ -601,7 +602,7 @@ init_kni(void)
/* Initialise a single port on an Ethernet device */
static void
-init_port(uint8_t port)
+init_port(uint16_t port)
{
int ret;
uint16_t nb_rxd = NB_RXD;
@@ -643,11 +644,12 @@ init_port(uint8_t port)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status\n");
@@ -662,14 +664,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %uMbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -698,7 +699,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* Callback for request of changing MTU */
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
int ret;
struct rte_eth_conf conf;
@@ -741,7 +742,7 @@ kni_change_mtu(uint8_t port_id, unsigned new_mtu)
/* Callback for request of configuring network interface up/down */
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up)
+kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
@@ -766,7 +767,7 @@ kni_config_network_interface(uint8_t port_id, uint8_t if_up)
}
static int
-kni_alloc(uint8_t port_id)
+kni_alloc(uint16_t port_id)
{
uint8_t i;
struct rte_kni *kni;
@@ -790,7 +791,7 @@ kni_alloc(uint8_t port_id)
} else
snprintf(conf.name, RTE_KNI_NAMESIZE,
"vEth%u", port_id);
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
/*
* The first KNI device associated to a port
@@ -803,8 +804,11 @@ kni_alloc(uint8_t port_id)
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
- conf.addr = dev_info.pci_dev->addr;
- conf.id = dev_info.pci_dev->id;
+
+ if (dev_info.pci_dev) {
+ conf.addr = dev_info.pci_dev->addr;
+ conf.id = dev_info.pci_dev->id;
+ }
memset(&ops, 0, sizeof(ops));
ops.port_id = port_id;
@@ -825,7 +829,7 @@ kni_alloc(uint8_t port_id)
}
static int
-kni_free_kni(uint8_t port_id)
+kni_free_kni(uint16_t port_id)
{
uint8_t i;
struct kni_port_params **p = kni_port_params_array;
@@ -848,7 +852,7 @@ int
main(int argc, char** argv)
{
int ret;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
unsigned i;
/* Associate signal_hanlder function with USR signals */
@@ -919,9 +923,6 @@ main(int argc, char** argv)
continue;
kni_free_kni(port);
}
-#ifdef RTE_LIBRTE_XEN_DOM0
- rte_kni_close();
-#endif
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
if (kni_port_params_array[i]) {
rte_free(kni_port_params_array[i]);
diff --git a/examples/l2fwd-cat/Makefile b/examples/l2fwd-cat/Makefile
index ae921ade..a7fe6d68 100644
--- a/examples/l2fwd-cat/Makefile
+++ b/examples/l2fwd-cat/Makefile
@@ -40,9 +40,6 @@ endif
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
-# Location of PQoS library and includes,
-PQOS_LIBRARY_PATH = $(PQOS_INSTALL_PATH)/libpqos.a
-
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
@@ -65,6 +62,6 @@ CFLAGS += -I$(PQOS_INSTALL_PATH)/../include
CFLAGS_cat.o := -D_GNU_SOURCE
LDLIBS += -L$(PQOS_INSTALL_PATH)
-LDLIBS += $(PQOS_LIBRARY_PATH)
+LDLIBS += -lpqos
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-cat/cat.c b/examples/l2fwd-cat/cat.c
index 6133bf5b..689dcb11 100644
--- a/examples/l2fwd-cat/cat.c
+++ b/examples/l2fwd-cat/cat.c
@@ -53,7 +53,11 @@
static const struct pqos_cap *m_cap;
static const struct pqos_cpuinfo *m_cpu;
static const struct pqos_capability *m_cap_l3ca;
+#if PQOS_VERSION <= 103
static unsigned m_sockets[PQOS_MAX_SOCKETS];
+#else
+static unsigned int *m_sockets;
+#endif
static unsigned m_sock_count;
static struct cat_config m_config[PQOS_MAX_CORES];
static unsigned m_config_count;
@@ -271,16 +275,16 @@ parse_l3ca(const char *l3ca)
/* scan the separator '@', ','(next) or '\0'(finish) */
l3ca += strcspn(l3ca, "@,");
- if (*l3ca == '@') {
- /* explicit assign cpu_set */
- offset = parse_set(l3ca + 1, &cpuset);
- if (offset < 0 || CPU_COUNT(&cpuset) == 0)
- goto err;
+ if (*l3ca != '@')
+ goto err;
- end = l3ca + 1 + offset;
- } else
+ /* explicit assign cpu_set */
+ offset = parse_set(l3ca + 1, &cpuset);
+ if (offset < 0 || CPU_COUNT(&cpuset) == 0)
goto err;
+ end = l3ca + 1 + offset;
+
if (*end != ',' && *end != '\0')
goto err;
@@ -353,9 +357,6 @@ parse_l3ca(const char *l3ca)
idx++;
} while (*end != '\0' && idx < PQOS_MAX_CORES);
- if (m_config_count == 0)
- goto err;
-
return 0;
err:
@@ -408,7 +409,11 @@ check_cpus(void)
goto exit;
}
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to read COS "
"associated to cpu %u.\n",
@@ -512,7 +517,11 @@ check_and_select_classes(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
for (j = 0; j < m_cpu->num_cores; j++) {
cpu_id = m_cpu->cores[j].lcore;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to read COS associated to "
"cpu %u on phy_pkg %u.\n", cpu_id, phy_pkg_id);
@@ -598,10 +607,19 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
l3ca.cdp = m_config[i].cdp;
if (m_config[i].cdp == 1) {
+#if PQOS_VERSION <= 103
l3ca.code_mask = m_config[i].code_mask;
l3ca.data_mask = m_config[i].data_mask;
+#else
+ l3ca.u.s.code_mask = m_config[i].code_mask;
+ l3ca.u.s.data_mask = m_config[i].data_mask;
+#endif
} else
+#if PQOS_VERSION <= 103
l3ca.ways_mask = m_config[i].mask;
+#else
+ l3ca.u.ways_mask = m_config[i].mask;
+#endif
for (j = 0; j < m_sock_count; j++) {
phy_pkg_id = m_sockets[j];
@@ -637,7 +655,11 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
cos_id = cos_id_map[i][phy_pkg_id];
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_set(cpu_id, cos_id);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to associate COS %u to "
"cpu %u\n", cos_id, cpu_id);
@@ -754,24 +776,43 @@ print_cat_config(void)
if (tab[n].cdp == 1) {
printf("PQOS: COS: %u, cMASK: 0x%llx, "
"dMASK: 0x%llx\n", tab[n].class_id,
+#if PQOS_VERSION <= 103
(unsigned long long)tab[n].code_mask,
(unsigned long long)tab[n].data_mask);
+#else
+ (unsigned long long)tab[n].u.s.code_mask,
+ (unsigned long long)tab[n].u.s.data_mask);
+#endif
} else {
printf("PQOS: COS: %u, MASK: 0x%llx\n",
tab[n].class_id,
+#if PQOS_VERSION <= 103
(unsigned long long)tab[n].ways_mask);
+#else
+ (unsigned long long)tab[n].u.ways_mask);
+#endif
}
}
}
for (i = 0; i < m_sock_count; i++) {
+#if PQOS_VERSION <= 103
unsigned lcores[PQOS_MAX_SOCKET_CORES] = {0};
+#else
+ unsigned int *lcores = NULL;
+#endif
unsigned lcount = 0;
unsigned n = 0;
+#if PQOS_VERSION <= 103
ret = pqos_cpu_get_cores(m_cpu, m_sockets[i],
PQOS_MAX_SOCKET_CORES, &lcount, &lcores[0]);
if (ret != PQOS_RETVAL_OK) {
+#else
+ lcores = pqos_cpu_get_cores(m_cpu, m_sockets[i],
+ &lcount);
+ if (lcores == NULL || lcount == 0) {
+#endif
printf("PQOS: Error retrieving core information!\n");
return;
}
@@ -780,13 +821,21 @@ print_cat_config(void)
for (n = 0; n < lcount; n++) {
unsigned class_id = 0;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(lcores[n], &class_id);
+#else
+ ret = pqos_alloc_assoc_get(lcores[n], &class_id);
+#endif
if (ret == PQOS_RETVAL_OK)
printf("PQOS: CPU: %u, COS: %u\n", lcores[n],
class_id);
else
printf("PQOS: CPU: %u, ERROR\n", lcores[n]);
}
+
+#if PQOS_VERSION > 103
+ free(lcores);
+#endif
}
}
@@ -849,7 +898,12 @@ cat_fini(void)
m_cap = NULL;
m_cpu = NULL;
m_cap_l3ca = NULL;
+#if PQOS_VERSION <= 103
memset(m_sockets, 0, sizeof(m_sockets));
+#else
+ if (m_sockets != NULL)
+ free(m_sockets);
+#endif
m_sock_count = 0;
memset(m_config, 0, sizeof(m_config));
m_config_count = 0;
@@ -875,7 +929,11 @@ cat_exit(void)
if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
continue;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_set(cpu_id, 0);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, 0);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to associate COS 0 to "
"cpu %u\n", cpu_id);
@@ -927,7 +985,9 @@ cat_init(int argc, char **argv)
/* PQoS Initialization - Check and initialize CAT capability */
cfg.fd_log = STDOUT_FILENO;
cfg.verbose = 0;
+#if PQOS_VERSION <= 103
cfg.cdp_cfg = PQOS_REQUIRE_CDP_ANY;
+#endif
ret = pqos_init(&cfg);
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Error initializing PQoS library!\n");
@@ -953,9 +1013,14 @@ cat_init(int argc, char **argv)
}
/* Get CPU socket information */
+#if PQOS_VERSION <= 103
ret = pqos_cpu_get_sockets(m_cpu, PQOS_MAX_SOCKETS, &m_sock_count,
m_sockets);
if (ret != PQOS_RETVAL_OK) {
+#else
+ m_sockets = pqos_cpu_get_sockets(m_cpu, &m_sock_count);
+ if (m_sockets == NULL) {
+#endif
printf("PQOS: Error retrieving CPU socket information!\n");
ret = -EFAULT;
goto err;
diff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c
index c293bd9c..b10ac896 100644
--- a/examples/l2fwd-cat/l2fwd-cat.c
+++ b/examples/l2fwd-cat/l2fwd-cat.c
@@ -59,7 +59,7 @@ static const struct rte_eth_conf port_conf_default = {
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -106,7 +106,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -124,8 +124,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
@@ -181,7 +181,7 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
@@ -217,7 +217,7 @@ main(int argc, char *argv[])
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index f020be32..d4e1682c 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -67,8 +67,6 @@
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
-#include <rte_memzone.h>
-#include <rte_pci.h>
#include <rte_per_lcore.h>
#include <rte_prefetch.h>
#include <rte_random.h>
@@ -86,6 +84,8 @@ enum cdev_type {
#define MAX_STR_LEN 32
#define MAX_KEY_SIZE 128
+#define MAX_IV_SIZE 16
+#define MAX_AAD_SIZE 65535
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define MAX_SESSIONS 32
@@ -112,7 +112,7 @@ static uint64_t l2fwd_enabled_port_mask;
static uint64_t l2fwd_enabled_crypto_mask;
/* list of enabled ports */
-static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
struct pkt_buffer {
@@ -139,7 +139,7 @@ enum l2fwd_crypto_xform_chain {
struct l2fwd_key {
uint8_t *data;
uint32_t length;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
};
struct l2fwd_iv {
@@ -224,7 +224,7 @@ struct l2fwd_crypto_params {
/** lcore configuration */
struct lcore_queue_conf {
unsigned nb_rx_ports;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
@@ -290,7 +290,7 @@ print_stats(void)
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
uint64_t total_packets_enqueued, total_packets_dequeued,
total_packets_errors;
- unsigned portid;
+ uint16_t portid;
uint64_t cdevid;
total_packets_dropped = 0;
@@ -496,7 +496,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
/* For wireless algorithms, offset/length must be in bits */
@@ -534,7 +534,16 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
IV_OFFSET);
/* Copy IV at the end of the crypto operation */
- rte_memcpy(iv_ptr, cparams->aead_iv.data, cparams->aead_iv.length);
+ /*
+ * If doing AES-CCM, nonce is copied one byte
+ * after the start of IV field
+ */
+ if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ rte_memcpy(iv_ptr + 1, cparams->aead_iv.data,
+ cparams->aead_iv.length);
+ else
+ rte_memcpy(iv_ptr, cparams->aead_iv.data,
+ cparams->aead_iv.length);
op->sym->aead.data.offset = ipdata_offset;
op->sym->aead.data.length = data_len;
@@ -548,7 +557,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
if (cparams->aad.length) {
@@ -566,7 +575,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
/* Send the burst of packets on an output interface */
static int
l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
- uint8_t port)
+ uint16_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
@@ -587,7 +596,7 @@ l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
/* Enqueue packets for TX and prepare them to be sent */
static int
-l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -610,7 +619,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
}
static void
-l2fwd_mac_updating(struct rte_mbuf *m, unsigned int dest_portid)
+l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
{
struct ether_hdr *eth;
void *tmp;
@@ -626,17 +635,17 @@ l2fwd_mac_updating(struct rte_mbuf *m, unsigned int dest_portid)
}
static void
-l2fwd_simple_forward(struct rte_mbuf *m, unsigned int portid,
+l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct l2fwd_crypto_options *options)
{
- unsigned int dst_port;
+ uint16_t dst_port;
dst_port = l2fwd_dst_ports[portid];
if (options->mac_updating)
l2fwd_mac_updating(m, dst_port);
- l2fwd_send_packet(m, (uint8_t) dst_port);
+ l2fwd_send_packet(m, dst_port);
}
/** Generate random key */
@@ -708,7 +717,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
- unsigned i, j, portid, nb_rx, len;
+ unsigned int i, j, nb_rx, len;
+ uint16_t portid;
struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -796,6 +806,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (!options->aad_param)
generate_random_key(port_cparams[i].aad.data,
port_cparams[i].aad.length);
+ /*
+ * If doing AES-CCM, first 18 bytes has to be reserved,
+ * and actual AAD should start from byte 18
+ */
+ if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ memmove(port_cparams[i].aad.data + 18,
+ port_cparams[i].aad.data,
+ port_cparams[i].aad.length);
} else
port_cparams[i].aad.length = 0;
@@ -865,7 +883,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
qconf->pkt_buf[portid].len,
- (uint8_t) portid);
+ portid);
qconf->pkt_buf[portid].len = 0;
}
@@ -899,7 +917,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
cparams = &port_cparams[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -1081,15 +1099,16 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
return -1;
}
-/** Parse crypto key command line argument */
+/** Parse bytes from command line argument */
static int
-parse_key(uint8_t *data, char *input_arg)
+parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size)
{
unsigned byte_count;
char *token;
+ errno = 0;
for (byte_count = 0, token = strtok(input_arg, ":");
- (byte_count < MAX_KEY_SIZE) && (token != NULL);
+ (byte_count < max_size) && (token != NULL);
token = strtok(NULL, ":")) {
int number = (int)strtol(token, NULL, 16);
@@ -1229,7 +1248,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
options->ckey_param = 1;
options->cipher_xform.cipher.key.length =
- parse_key(options->cipher_xform.cipher.key.data, optarg);
+ parse_bytes(options->cipher_xform.cipher.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->cipher_xform.cipher.key.length > 0)
return 0;
else
@@ -1242,7 +1262,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
options->cipher_iv_param = 1;
options->cipher_iv.length =
- parse_key(options->cipher_iv.data, optarg);
+ parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE);
if (options->cipher_iv.length > 0)
return 0;
else
@@ -1265,7 +1285,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
options->akey_param = 1;
options->auth_xform.auth.key.length =
- parse_key(options->auth_xform.auth.key.data, optarg);
+ parse_bytes(options->auth_xform.auth.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->auth_xform.auth.key.length > 0)
return 0;
else
@@ -1279,7 +1300,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
options->auth_iv_param = 1;
options->auth_iv.length =
- parse_key(options->auth_iv.data, optarg);
+ parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE);
if (options->auth_iv.length > 0)
return 0;
else
@@ -1302,7 +1323,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
options->aead_key_param = 1;
options->aead_xform.aead.key.length =
- parse_key(options->aead_xform.aead.key.data, optarg);
+ parse_bytes(options->aead_xform.aead.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->aead_xform.aead.key.length > 0)
return 0;
else
@@ -1316,7 +1338,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
options->aead_iv_param = 1;
options->aead_iv.length =
- parse_key(options->aead_iv.data, optarg);
+ parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE);
if (options->aead_iv.length > 0)
return 0;
else
@@ -1329,7 +1351,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aad") == 0) {
options->aad_param = 1;
options->aad.length =
- parse_key(options->aad.data, optarg);
+ parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE);
if (options->aad.length > 0)
return 0;
else
@@ -1731,11 +1753,12 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1750,14 +1773,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -2319,7 +2341,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
static int
initialize_ports(struct l2fwd_crypto_options *options)
{
- uint8_t last_portid, portid;
+ uint16_t last_portid, portid;
unsigned enabled_portcount = 0;
unsigned nb_ports = rte_eth_dev_count();
@@ -2340,12 +2362,12 @@ initialize_ports(struct l2fwd_crypto_options *options)
continue;
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (retval < 0) {
printf("Cannot configure device: err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2353,7 +2375,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
&nb_txd);
if (retval < 0) {
printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2364,7 +2386,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
NULL, l2fwd_pktmbuf_pool);
if (retval < 0) {
printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2375,7 +2397,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
NULL);
if (retval < 0) {
printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2384,7 +2406,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
retval = rte_eth_dev_start(portid);
if (retval < 0) {
printf("rte_eth_dev_start:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2393,7 +2415,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
@@ -2461,7 +2483,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
if (options->aad.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
- options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
+ options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
}
int
@@ -2470,7 +2492,8 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct l2fwd_crypto_options options;
- uint8_t nb_ports, nb_cryptodevs, portid, cdev_id;
+ uint8_t nb_cryptodevs, cdev_id;
+ uint16_t nb_ports, portid;
unsigned lcore_id, rx_lcore_id;
int ret, enabled_cdevcount, enabled_portcount;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
@@ -2547,7 +2570,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->nb_rx_ports] = portid;
qconf->nb_rx_ports++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned)portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
/* Enable Crypto devices */
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 98936206..485370de 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -43,7 +43,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -53,7 +52,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -176,7 +174,7 @@ show_lcore_stats(unsigned lcore_id)
uint64_t busy, busy_min, busy_max;
/* Jobs statistics. */
- const uint8_t port_cnt = qconf->n_rx_port;
+ const uint16_t port_cnt = qconf->n_rx_port;
uint64_t jobs_exec_cnt[port_cnt], jobs_period[port_cnt];
uint64_t jobs_exec[port_cnt], jobs_exec_min[port_cnt],
jobs_exec_max[port_cnt];
@@ -414,11 +412,11 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *m;
- const uint8_t port_idx = (uintptr_t) arg;
+ const uint16_t port_idx = (uintptr_t) arg;
const unsigned lcore_id = rte_lcore_id();
struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
struct rte_jobstats *job = &qconf->port_fwd_jobs[port_idx];
- const uint8_t portid = qconf->rx_port_list[port_idx];
+ const uint16_t portid = qconf->rx_port_list[port_idx];
uint8_t j;
uint16_t total_nb_rx;
@@ -428,7 +426,7 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
/* Call rx burst 2 times. This allow rte_jobstats logic to see if this
* function must be called more frequently. */
- total_nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
+ total_nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
for (j = 0; j < total_nb_rx; j++) {
@@ -438,7 +436,7 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
}
if (total_nb_rx == MAX_PKT_BURST) {
- const uint16_t nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
+ const uint16_t nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
total_nb_rx += nb_rx;
@@ -464,7 +462,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
uint64_t now;
unsigned lcore_id;
struct lcore_queue_conf *qconf;
- uint8_t portid;
+ uint16_t portid;
unsigned i;
uint32_t sent;
struct rte_eth_dev_tx_buffer *buffer;
@@ -714,11 +712,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -733,14 +732,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -776,9 +774,9 @@ main(int argc, char **argv)
unsigned nb_ports_in_mask = 0;
int ret;
char name[RTE_JOBSTATS_NAMESIZE];
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
uint8_t i;
/* init EAL */
@@ -861,7 +859,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -870,24 +868,24 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
@@ -899,7 +897,7 @@ main(int argc, char **argv)
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -907,8 +905,9 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -916,7 +915,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -924,21 +923,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done:\n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 83bc542c..358ca5ec 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -51,7 +51,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -61,7 +60,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -157,7 +155,7 @@ print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
__attribute__((unused)) void *ptr_data)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
@@ -299,7 +297,7 @@ l2fwd_main_loop(void)
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -479,11 +477,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -498,14 +497,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -560,9 +558,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
struct sigaction signal_handler;
@@ -653,7 +651,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
printf("Lcore %u: RX port %u\n",
- rx_lcore_id, (unsigned) portid);
+ rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -662,26 +660,25 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n",
- (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
@@ -694,7 +691,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -704,7 +701,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -712,7 +709,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -720,21 +717,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: "
"%02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 14263358..e89e2e1b 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -52,7 +52,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -62,7 +61,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -304,7 +302,7 @@ l2fwd_main_loop(void)
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -480,11 +478,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -503,14 +502,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -553,9 +551,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
@@ -644,7 +642,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -653,24 +651,24 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
@@ -682,7 +680,7 @@ main(int argc, char **argv)
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -691,7 +689,7 @@ main(int argc, char **argv)
NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -699,7 +697,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -707,21 +705,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done: \n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 8eff4de4..e50b1a1a 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -121,7 +119,7 @@ static int promiscuous_on; /**< Ports set in promiscuous mode off by default. */
static int numa_on = 1; /**< NUMA is enabled by default. */
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -131,7 +129,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -184,7 +182,7 @@ static inline int
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
#endif
static inline void
-send_single_packet(struct rte_mbuf *m, uint8_t port);
+send_single_packet(struct rte_mbuf *m, uint16_t port);
#define MAX_ACL_RULE_NUM 100000
#define DEFAULT_MAX_CATEGORIES 1
@@ -1026,6 +1024,7 @@ add_rules(const char *rule_path,
char buff[LINE_MAX];
FILE *fh = fopen(rule_path, "rb");
unsigned int i = 0;
+ int val;
if (fh == NULL)
rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
@@ -1042,7 +1041,11 @@ add_rules(const char *rule_path,
rte_exit(EXIT_FAILURE, "Not find any route entries in %s!\n",
rule_path);
- fseek(fh, 0, SEEK_SET);
+ val = fseek(fh, 0, SEEK_SET);
+ if (val < 0) {
+ rte_exit(EXIT_FAILURE, "%s: File seek operation failed\n",
+ __func__);
+ }
acl_rules = calloc(acl_num, rule_size);
@@ -1297,7 +1300,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
/* Enqueue a single packet, and send burst if queue is filled */
static inline void
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
struct lcore_conf *qconf;
@@ -1358,7 +1361,8 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1381,7 +1385,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -1500,7 +1504,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -1829,11 +1833,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1848,14 +1853,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -1893,7 +1897,8 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint16_t portid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index fd442f5e..0a4ed145 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -50,7 +50,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -174,7 +172,7 @@ enum freq_scale_hint_t
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
@@ -190,7 +188,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -308,8 +306,8 @@ static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
#define IPV6_L3FWD_NUM_ROUTES \
(sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
@@ -370,7 +368,7 @@ static struct rte_timer power_timers[RTE_MAX_LCORE];
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
- unsigned lcore_id, uint8_t port_id, uint16_t queue_id);
+ unsigned int lcore_id, uint16_t port_id, uint16_t queue_id);
/* exit signal handler */
static void
@@ -451,7 +449,7 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim,
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
struct lcore_conf *qconf;
@@ -523,8 +521,8 @@ print_ipv6_key(struct ipv6_5tuple key)
key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
@@ -559,11 +557,11 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
@@ -599,18 +597,18 @@ get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
next_hop : portid);
}
@@ -634,7 +632,7 @@ parse_ptype_one(struct rte_mbuf *m)
}
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -648,7 +646,7 @@ cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
}
static int
-add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
+add_cb_parse_ptype(uint16_t portid, uint16_t queueid)
{
printf("Port %d: softly parse packet type info\n", portid);
if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
@@ -659,13 +657,13 @@ add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
}
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *d_addr_bytes;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -752,13 +750,11 @@ power_idle_heuristic(uint32_t zero_rx_packet_count)
*/
else
return SUSPEND_THRESHOLD;
-
- return 0;
}
static inline enum freq_scale_hint_t
power_freq_scaleup_heuristic(unsigned lcore_id,
- uint8_t port_id,
+ uint16_t port_id,
uint16_t queue_id)
{
/**
@@ -805,7 +801,8 @@ sleep_until_rx_interrupt(int num)
{
struct rte_epoll_event event[num];
int n, i;
- uint8_t port_id, queue_id;
+ uint16_t port_id;
+ uint8_t queue_id;
void *data;
RTE_LOG(INFO, L3FWD_POWER,
@@ -832,7 +829,8 @@ static void turn_on_intr(struct lcore_conf *qconf)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t port_id, queue_id;
+ uint8_t queue_id;
+ uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
rx_queue = &(qconf->rx_queue_list[i]);
@@ -848,7 +846,8 @@ static void turn_on_intr(struct lcore_conf *qconf)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
uint32_t data;
int ret;
int i;
@@ -879,7 +878,8 @@ main_loop(__attribute__((unused)) void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -904,7 +904,7 @@ main_loop(__attribute__((unused)) void *dummy)
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu "
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
"rxqueueid=%hhu\n", lcore_id, portid, queueid);
}
@@ -1111,7 +1111,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -1541,11 +1541,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1594,7 +1595,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
-static int check_ptype(uint8_t portid)
+static int check_ptype(uint16_t portid)
{
int i, ret;
int ptype_l3_ipv4 = 0;
@@ -1645,13 +1646,14 @@ main(int argc, char **argv)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret;
- unsigned nb_ports;
+ uint16_t nb_ports;
uint16_t queueid;
unsigned lcore_id;
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
+ uint16_t portid;
uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
@@ -1751,7 +1753,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (qconf->tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
}
@@ -1871,7 +1873,7 @@ main(int argc, char **argv)
rte_spinlock_init(&(locks[portid]));
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 34e4a6be..6ef89fc8 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -59,7 +58,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -155,7 +153,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -165,7 +163,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -300,7 +298,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static rte_spinlock_t spinlock_conf[RTE_MAX_ETHPORTS] = {RTE_SPINLOCK_INITIALIZER};
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -324,7 +322,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
@@ -396,8 +394,9 @@ print_key(struct ipv4_5tuple key)
(unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint16_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
struct tcp_hdr *tcp;
@@ -430,29 +429,31 @@ get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd
/* Find destination port */
ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint32_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
- next_hop : portid);
+ return ((rte_lpm_lookup(l3fwd_lookup_struct,
+ rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0) ?
+ next_hop : portid);
}
#endif
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *tmp;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -496,7 +497,8 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -516,8 +518,8 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
- portid, queueid);
+ RTE_LOG(INFO, L3FWD, " --lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
}
while (1) {
@@ -624,7 +626,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -676,8 +678,8 @@ print_usage(const char *prgname)
static void
signal_handler(int signum)
{
- uint8_t portid;
- uint8_t nb_ports = rte_eth_dev_count();
+ uint16_t portid;
+ uint16_t nb_ports = rte_eth_dev_count();
/* When we receive a SIGINT signal */
if (signum == SIGINT) {
@@ -749,7 +751,7 @@ parse_config(const char *q_arg)
nb_lcore_params);
return -1;
}
- lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+ lcore_params_array[nb_lcore_params].port_id = int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -953,11 +955,11 @@ main(int argc, char **argv)
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t nb_lcores;
uint16_t n_tx_queue;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
signal(SIGINT, signal_handler);
/* init EAL */
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index 011ba148..4bb15943 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -83,7 +83,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -117,7 +117,7 @@ extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -139,7 +139,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
send_single_packet(struct lcore_conf *qconf,
- struct rte_mbuf *m, uint8_t port)
+ struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
@@ -212,11 +212,11 @@ int
lpm_check_ptype(int portid);
uint16_t
-em_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+em_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
uint16_t
-lpm_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+lpm_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
int
diff --git a/examples/l3fwd/l3fwd_altivec.h b/examples/l3fwd/l3fwd_altivec.h
new file mode 100644
index 00000000..a1d25eaa
--- /dev/null
+++ b/examples/l3fwd/l3fwd_altivec.h
@@ -0,0 +1,284 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _L3FWD_ALTIVEC_H_
+#define _L3FWD_ALTIVEC_H_
+
+#include "l3fwd.h"
+#include "l3fwd_common.h"
+
+/*
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+{
+ vector unsigned int te[FWDSTEP];
+ vector unsigned int ve[FWDSTEP];
+ vector unsigned int *p[FWDSTEP];
+
+ p[0] = rte_pktmbuf_mtod(pkt[0], vector unsigned int *);
+ p[1] = rte_pktmbuf_mtod(pkt[1], vector unsigned int *);
+ p[2] = rte_pktmbuf_mtod(pkt[2], vector unsigned int *);
+ p[3] = rte_pktmbuf_mtod(pkt[3], vector unsigned int *);
+
+ ve[0] = (vector unsigned int)val_eth[dst_port[0]];
+ te[0] = *p[0];
+
+ ve[1] = (vector unsigned int)val_eth[dst_port[1]];
+ te[1] = *p[1];
+
+ ve[2] = (vector unsigned int)val_eth[dst_port[2]];
+ te[2] = *p[2];
+
+ ve[3] = (vector unsigned int)val_eth[dst_port[3]];
+ te[3] = *p[3];
+
+ /* Update first 12 bytes, keep rest bytes intact. */
+ te[0] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[0],
+ (vector unsigned short)te[0],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ te[1] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[1],
+ (vector unsigned short)te[1],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ te[2] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[2],
+ (vector unsigned short)te[2],
+ (vector unsigned short) {0, 0, 0, 0, 0,
+ 0, 0xffff, 0xffff});
+
+ te[3] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[3],
+ (vector unsigned short)te[3],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ *p[0] = te[0];
+ *p[1] = te[1];
+ *p[2] = te[2];
+ *p[3] = te[3];
+
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
+ &dst_port[0], pkt[0]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
+ &dst_port[1], pkt[1]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
+ &dst_port[2], pkt[2]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
+ &dst_port[3], pkt[3]->packet_type);
+}
+
+/*
+ * Group consecutive packets with the same destination port in bursts of 4.
+ * Suppose we have array of destination ports:
+ * dst_port[] = {a, b, c, d,, e, ... }
+ * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+ * We doing 4 comparisons at once and the result is 4 bit mask.
+ * This mask is used as an index into prebuild array of pnum values.
+ */
+static inline uint16_t *
+port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, vector unsigned short dp1,
+ vector unsigned short dp2)
+{
+ union {
+ uint16_t u16[FWDSTEP + 1];
+ uint64_t u64;
+ } *pnum = (void *)pn;
+
+ int32_t v;
+
+ v = vec_any_eq(dp1, dp2);
+
+
+ /* update last port counter. */
+ lp[0] += gptbl[v].lpv;
+
+ /* if dest port value has changed. */
+ if (v != GRPMSK) {
+ pnum->u64 = gptbl[v].pnum;
+ pnum->u16[FWDSTEP] = 1;
+ lp = pnum->u16 + gptbl[v].idx;
+ }
+
+ return lp;
+}
+
+/**
+ * Process one packet:
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
+{
+ struct ether_hdr *eth_hdr;
+ vector unsigned int te, ve;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ te = *(vector unsigned int *)eth_hdr;
+ ve = (vector unsigned int)val_eth[dst_port[0]];
+
+ rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ pkt->packet_type);
+
+ /* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
+ te = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve,
+ (vector unsigned short)te,
+ (vector unsigned short){0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ *(vector unsigned int *)eth_hdr = te;
+}
+
+/**
+ * Send packets burst from pkts_burst to the ports in dst_port array
+ */
+static __rte_always_inline void
+send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
+{
+ int32_t k;
+ int j = 0;
+ uint16_t dlp;
+ uint16_t *lp;
+ uint16_t pnum[MAX_PKT_BURST + 1];
+
+ /*
+ * Finish packet processing and group consecutive
+ * packets with the same destination port.
+ */
+ k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ if (k != 0) {
+ vector unsigned short dp1, dp2;
+
+ lp = pnum;
+ lp[0] = 1;
+
+ processx4_step3(pkts_burst, dst_port);
+
+ /* dp1: <d[0], d[1], d[2], d[3], ... > */
+ dp1 = *(vector unsigned short *)dst_port;
+
+ for (j = FWDSTEP; j != k; j += FWDSTEP) {
+ processx4_step3(&pkts_burst[j], &dst_port[j]);
+
+ /*
+ * dp2:
+ * <d[j-3], d[j-2], d[j-1], d[j], ... >
+ */
+ dp2 = *((vector unsigned short *)
+ &dst_port[j - FWDSTEP + 1]);
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * dp1:
+ * <d[j], d[j+1], d[j+2], d[j+3], ... >
+ */
+ dp1 = vec_sro(dp2, (vector unsigned char) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])});
+ }
+
+ /*
+ * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
+ */
+ dp2 = vec_perm(dp1, (vector unsigned short){},
+ (vector unsigned char){0xf9});
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * remove values added by the last repeated
+ * dst port.
+ */
+ lp[0]--;
+ dlp = dst_port[j - 1];
+ } else {
+ /* set dlp and lp to the never used values. */
+ dlp = BAD_PORT - 1;
+ lp = pnum + MAX_PKT_BURST;
+ }
+
+ /* Process up to last 3 packets one by one. */
+ switch (nb_rx % FWDSTEP) {
+ case 3:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fall-through */
+ case 2:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fall-through */
+ case 1:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ }
+
+ /*
+ * Send packets out, through destination port.
+ * Consecutive packets with the same destination port
+ * are already grouped together.
+ * If destination port for the packet equals BAD_PORT,
+ * then free the packet without sending it out.
+ */
+ for (j = 0; j < nb_rx; j += k) {
+
+ int32_t m;
+ uint16_t pn;
+
+ pn = dst_port[j];
+ k = pnum[j];
+
+ if (likely(pn != BAD_PORT))
+ send_packetsx4(qconf, pn, pkts_burst + j, k);
+ else
+ for (m = j; m != j + k; m++)
+ rte_pktmbuf_free(pkts_burst[m]);
+
+ }
+}
+
+#endif /* _L3FWD_ALTIVEC_H_ */
diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h
index 2867365d..7002a43a 100644
--- a/examples/l3fwd/l3fwd_common.h
+++ b/examples/l3fwd/l3fwd_common.h
@@ -207,7 +207,7 @@ static const struct {
};
static __rte_always_inline void
-send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[],
+send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
uint32_t num)
{
uint32_t len, j, n;
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 53d081bd..2b7c173b 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -274,8 +274,8 @@ em_mask_key(void *key, xmm_t mask)
#error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
#endif
-static inline uint8_t
-em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+static inline uint16_t
+em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
int ret = 0;
union ipv4_5tuple_host key;
@@ -292,11 +292,11 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
+ return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret];
}
-static inline uint8_t
-em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+static inline uint16_t
+em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
{
int ret = 0;
union ipv6_5tuple_host key;
@@ -325,7 +325,7 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
+ return (ret < 0) ? portid : ipv6_l3fwd_out_if[ret];
}
#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
@@ -628,7 +628,7 @@ em_parse_ptype(struct rte_mbuf *m)
}
uint16_t
-em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -649,7 +649,8 @@ em_main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -671,7 +672,7 @@ em_main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h
index d509a1fc..302291d7 100644
--- a/examples/l3fwd/l3fwd_em.h
+++ b/examples/l3fwd/l3fwd_em.h
@@ -35,12 +35,12 @@
#define __L3FWD_EM_H__
static __rte_always_inline void
-l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
@@ -112,7 +112,7 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
*/
static inline void
l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index 520672d5..9d7afe05 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -52,7 +52,7 @@
static __rte_always_inline void
em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
@@ -68,7 +68,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv4_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
@@ -79,7 +79,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
static __rte_always_inline void
em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
@@ -95,7 +95,7 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv6_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
@@ -106,9 +106,9 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
- uint8_t next_hop;
+ uint16_t next_hop;
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
uint32_t tcp_or_udp;
@@ -158,7 +158,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i, j, pos;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h
index cb7c2abb..fa89f0f3 100644
--- a/examples/l3fwd/l3fwd_em_sequential.h
+++ b/examples/l3fwd/l3fwd_em_sequential.h
@@ -51,7 +51,7 @@
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
uint8_t next_hop;
struct ipv4_hdr *ipv4_hdr;
@@ -103,7 +103,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i, j;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index ff1e4035..2d0e1724 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -105,7 +105,7 @@ struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
static inline uint16_t
-lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm *ipv4_l3fwd_lookup_struct =
@@ -117,7 +117,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
}
static inline uint16_t
-lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
@@ -130,7 +130,7 @@ lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
static __rte_always_inline uint16_t
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
struct ipv6_hdr *ipv6_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -162,7 +162,7 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static __rte_always_inline uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint32_t dst_ipv4, uint8_t portid)
+ uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
@@ -191,6 +191,8 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
#include "l3fwd_lpm_sse.h"
#elif defined RTE_MACHINE_CPUFLAG_NEON
#include "l3fwd_lpm_neon.h"
+#elif defined(RTE_ARCH_PPC_64)
+#include "l3fwd_lpm_altivec.h"
#else
#include "l3fwd_lpm.h"
#endif
@@ -203,7 +205,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -225,7 +228,7 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -263,7 +266,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
if (nb_rx == 0)
continue;
-#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+ || defined RTE_ARCH_PPC_64
l3fwd_lpm_send_packets(nb_rx, pkts_burst,
portid, qconf);
#else
@@ -413,7 +417,7 @@ lpm_parse_ptype(struct rte_mbuf *m)
}
uint16_t
-lpm_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index 55c3e832..53b7fc80 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -35,7 +35,7 @@
#define __L3FWD_LPM_H__
static __rte_always_inline void
-l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
@@ -104,7 +104,7 @@ l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
static inline void
l3fwd_lpm_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h
new file mode 100644
index 00000000..36ca983f
--- /dev/null
+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __L3FWD_LPM_ALTIVEC_H__
+#define __L3FWD_LPM_ALTIVEC_H__
+
+#include "l3fwd_altivec.h"
+
+/*
+ * Read packet_type and destination IPV4 addresses from 4 mbufs.
+ */
+static inline void
+processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+ vector unsigned int *dip,
+ uint32_t *ipv4_flag)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *eth_hdr;
+ uint32_t x0, x1, x2, x3;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x0 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x1 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[1]->packet_type;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x2 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[2]->packet_type;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x3 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[3]->packet_type;
+
+ rte_compiler_barrier();
+ dip[0] = (vector unsigned int){x0, x1, x2, x3};
+}
+
+/*
+ * Lookup into LPM for destination port.
+ * If lookup fails, use incoming port (portid) as destination port.
+ */
+static inline void
+processx4_step2(const struct lcore_conf *qconf,
+ vector unsigned int dip,
+ uint32_t ipv4_flag,
+ uint8_t portid,
+ struct rte_mbuf *pkt[FWDSTEP],
+ uint16_t dprt[FWDSTEP])
+{
+ rte_xmm_t dst;
+ const vector unsigned char bswap_mask = (vector unsigned char){
+ 3, 2, 1, 0,
+ 7, 6, 5, 4,
+ 11, 10, 9, 8,
+ 15, 14, 13, 12};
+
+ /* Byte swap 4 IPV4 addresses. */
+ dip = (vector unsigned int)vec_perm(*(vector unsigned char *)&dip,
+ (vector unsigned char){}, bswap_mask);
+
+ /* if all 4 packets are IPV4. */
+ if (likely(ipv4_flag)) {
+ rte_lpm_lookupx4(qconf->ipv4_lookup_struct, (xmm_t)dip,
+ (uint32_t *)&dst, portid);
+ /* get rid of unused upper 16 bit for each dport. */
+ dst.x = (xmm_t)vec_packs(dst.x, dst.x);
+ *(uint64_t *)dprt = dst.u64[0];
+ } else {
+ dst.x = (xmm_t)dip;
+ dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0],
+ dst.u32[0], portid);
+ dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1],
+ dst.u32[1], portid);
+ dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2],
+ dst.u32[2], portid);
+ dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3],
+ dst.u32[3], portid);
+ }
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+ uint8_t portid, struct lcore_conf *qconf)
+{
+ int32_t j;
+ uint16_t dst_port[MAX_PKT_BURST];
+ vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
+ uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
+ const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+
+ for (j = 0; j != k; j += FWDSTEP)
+ processx4_step1(&pkts_burst[j], &dip[j / FWDSTEP],
+ &ipv4_flag[j / FWDSTEP]);
+
+ for (j = 0; j != k; j += FWDSTEP)
+ processx4_step2(qconf, dip[j / FWDSTEP],
+ ipv4_flag[j / FWDSTEP],
+ portid, &pkts_burst[j], &dst_port[j]);
+
+ /* Classify last up to 3 packets one by one */
+ switch (nb_rx % FWDSTEP) {
+ case 3:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ case 2:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ case 1:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ }
+
+ send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+}
+
+#endif /* __L3FWD_LPM_ALTIVEC_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
index baedbfe8..85f314d1 100644
--- a/examples/l3fwd/l3fwd_lpm_neon.h
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -82,7 +82,7 @@ static inline void
processx4_step2(const struct lcore_conf *qconf,
int32x4_t dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -115,7 +115,7 @@ processx4_step2(const struct lcore_conf *qconf,
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i = 0, j = 0;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 4e294c84..d474396e 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -79,7 +79,7 @@ static inline void
processx4_step2(const struct lcore_conf *qconf,
__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -112,7 +112,7 @@ processx4_step2(const struct lcore_conf *qconf,
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h
index 42d50d3c..b319b5a9 100644
--- a/examples/l3fwd/l3fwd_neon.h
+++ b/examples/l3fwd/l3fwd_neon.h
@@ -114,6 +114,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1,
/* update last port counter. */
lp[0] += gptbl[v].lpv;
+ rte_compiler_barrier();
/* if dest port value has changed. */
if (v != GRPMSK) {
@@ -192,7 +193,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
* dp1:
* <d[j], d[j+1], d[j+2], d[j+3], ... >
*/
- dp1 = vextq_u16(dp1, dp1, FWDSTEP - 1);
+ dp1 = vextq_u16(dp2, dp1, FWDSTEP - 1);
}
/*
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 81995fdb..6229568f 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -50,7 +50,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -124,7 +122,7 @@ uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -245,7 +243,7 @@ check_lcore_params(void)
static int
check_port_config(const unsigned nb_ports)
{
- unsigned portid;
+ uint16_t portid;
uint16_t i;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -263,7 +261,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -445,7 +443,7 @@ parse_config(const char *q_arg)
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
@@ -750,11 +748,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -773,14 +772,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -818,7 +816,7 @@ signal_handler(int signum)
}
static int
-prepare_ptype_parser(uint8_t portid, uint16_t queueid)
+prepare_ptype_parser(uint16_t portid, uint16_t queueid)
{
if (parse_ptype) {
printf("Port %d: softly parse packet type info\n", portid);
@@ -847,10 +845,10 @@ main(int argc, char **argv)
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1048,7 +1046,7 @@ main(int argc, char **argv)
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
ret = 0;
/* launch per-lcore init on every lcore */
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index f4e3969a..bc47dcce 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -50,7 +50,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -145,7 +143,7 @@ print_stats(void)
{
struct rte_eth_link link;
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
@@ -165,7 +163,7 @@ print_stats(void)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait((uint8_t)portid, &link);
+ rte_eth_link_get_nowait(portid, &link);
printf("\nStatistics for port %u ------------------------------"
"\nLink status: %25s"
"\nLink speed: %26u"
@@ -470,7 +468,7 @@ lsi_parse_args(int argc, char **argv)
* int.
*/
static int
-lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
struct rte_eth_link link;
@@ -494,11 +492,12 @@ lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -513,14 +512,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -553,8 +551,8 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t portid, portid_last = 0;
+ uint16_t nb_ports;
+ uint16_t portid, portid_last = 0;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index 50325095..755a86e4 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -240,7 +238,7 @@ parse_arg_rx(const char *arg)
if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) {
return -9;
}
- lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port;
+ lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = port;
lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue;
lp->io.rx.n_nic_queues ++;
@@ -318,7 +316,7 @@ parse_arg_tx(const char *arg)
if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
return -9;
}
- lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
+ lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = port;
lp->io.tx.n_nic_ports ++;
n_tuples ++;
@@ -488,7 +486,7 @@ app_check_lpm_table(void)
static int
app_check_every_rx_port_is_tx_enabled(void)
{
- uint8_t port;
+ uint16_t port;
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) {
@@ -762,7 +760,7 @@ app_parse_args(int argc, char **argv)
}
int
-app_get_nic_rx_queues_per_port(uint8_t port)
+app_get_nic_rx_queues_per_port(uint16_t port)
{
uint32_t i, count;
@@ -781,7 +779,7 @@ app_get_nic_rx_queues_per_port(uint8_t port)
}
int
-app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
+app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue, uint32_t *lcore_out)
{
uint32_t lcore;
@@ -808,7 +806,7 @@ app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
}
int
-app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out)
+app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out)
{
uint32_t lcore;
@@ -901,7 +899,7 @@ app_print_params(void)
/* Print NIC RX configuration */
printf("NIC RX ports: ");
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
- uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);
+ uint32_t n_rx_queues = app_get_nic_rx_queues_per_port(port);
if (n_rx_queues == 0) {
continue;
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index 717232e6..3dab7f25 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -308,7 +306,7 @@ app_init_rings_tx(void)
continue;
}
- if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
+ if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) {
rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
port);
}
@@ -359,11 +357,12 @@ app_init_rings_tx(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
uint32_t n_rx_queues, n_tx_queues;
@@ -383,14 +382,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %uMbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -422,7 +420,8 @@ app_init_nics(void)
{
unsigned socket;
uint32_t lcore;
- uint8_t port, queue;
+ uint16_t port;
+ uint8_t queue;
int ret;
uint32_t n_rx_queues, n_tx_queues;
@@ -440,14 +439,14 @@ app_init_nics(void)
}
/* Init port */
- printf("Initializing NIC port %u ...\n", (unsigned) port);
+ printf("Initializing NIC port %u ...\n", port);
ret = rte_eth_dev_configure(
port,
(uint8_t) n_rx_queues,
(uint8_t) n_tx_queues,
&port_conf);
if (ret < 0) {
- rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret);
+ rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
}
rte_eth_promiscuous_enable(port);
@@ -457,7 +456,7 @@ app_init_nics(void)
port, &nic_rx_ring_size, &nic_tx_ring_size);
if (ret < 0) {
rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
- (unsigned) port, ret);
+ port, ret);
}
app.nic_rx_ring_size = nic_rx_ring_size;
app.nic_tx_ring_size = nic_tx_ring_size;
@@ -473,8 +472,7 @@ app_init_nics(void)
pool = app.lcore_params[lcore].pool;
printf("Initializing NIC port %u RX queue %u ...\n",
- (unsigned) port,
- (unsigned) queue);
+ port, queue);
ret = rte_eth_rx_queue_setup(
port,
queue,
@@ -484,9 +482,7 @@ app_init_nics(void)
pool);
if (ret < 0) {
rte_panic("Cannot init RX queue %u for port %u (%d)\n",
- (unsigned) queue,
- (unsigned) port,
- ret);
+ queue, port, ret);
}
}
@@ -495,7 +491,7 @@ app_init_nics(void)
app_get_lcore_for_nic_tx(port, &lcore);
socket = rte_lcore_to_socket_id(lcore);
printf("Initializing NIC port %u TX queue 0 ...\n",
- (unsigned) port);
+ port);
ret = rte_eth_tx_queue_setup(
port,
0,
diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c
index 65ceea4a..7ced84d4 100644
--- a/examples/load_balancer/main.c
+++ b/examples/load_balancer/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -58,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
diff --git a/examples/load_balancer/main.h b/examples/load_balancer/main.h
index dc407555..bef2ba04 100644
--- a/examples/load_balancer/main.h
+++ b/examples/load_balancer/main.h
@@ -248,7 +248,7 @@ struct app_lcore_params_io {
struct {
/* NIC */
struct {
- uint8_t port;
+ uint16_t port;
uint8_t queue;
} nic_queues[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
uint32_t n_nic_queues;
@@ -275,7 +275,7 @@ struct app_lcore_params_io {
struct rte_ring *rings[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
/* NIC */
- uint8_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+ uint16_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
uint32_t n_nic_ports;
/* Internal buffers */
@@ -368,9 +368,10 @@ void app_print_usage(void);
void app_init(void);
int app_lcore_main_loop(void *arg);
-int app_get_nic_rx_queues_per_port(uint8_t port);
-int app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out);
-int app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out);
+int app_get_nic_rx_queues_per_port(uint16_t port);
+int app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue,
+ uint32_t *lcore_out);
+int app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out);
int app_is_socket_used(uint32_t socket);
uint32_t app_get_lcores_io_rx(void);
uint32_t app_get_lcores_worker(void);
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index e54b7851..f65e14f0 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -188,7 +186,7 @@ app_lcore_io_rx(
uint32_t i;
for (i = 0; i < lp->rx.n_nic_queues; i ++) {
- uint8_t port = lp->rx.nic_queues[i].port;
+ uint16_t port = lp->rx.nic_queues[i].port;
uint8_t queue = lp->rx.nic_queues[i].queue;
uint32_t n_mbufs, j;
@@ -213,7 +211,7 @@ app_lcore_io_rx(
printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
(double) stats.imissed / (double) (stats.imissed + stats.ipackets),
((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
lp->rx.nic_queues_iters[i] = 0;
@@ -339,7 +337,7 @@ app_lcore_io_tx(
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i ++) {
- uint8_t port = lp->tx.nic_ports[i];
+ uint16_t port = lp->tx.nic_ports[i];
struct rte_ring *ring = lp->tx.rings[port][worker];
uint32_t n_mbufs, n_pkts;
int ret;
@@ -395,7 +393,7 @@ app_lcore_io_tx(
printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
lp->tx.nic_ports_iters[port] = 0;
lp->tx.nic_ports_count[port] = 0;
@@ -418,7 +416,7 @@ app_lcore_io_tx(
static inline void
app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
{
- uint8_t port;
+ uint16_t port;
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i++) {
@@ -569,7 +567,7 @@ app_lcore_worker(
if (lp->rings_out_iters[port] == APP_STATS){
printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
(unsigned) lp->worker_id,
- (unsigned) port,
+ port,
((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
lp->rings_out_iters[port] = 0;
lp->rings_out_count[port] = 0;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index f8453e57..30ce4b34 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -57,7 +57,6 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
@@ -74,7 +73,7 @@ static uint8_t client_id = 0;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
-static uint8_t output_ports[RTE_MAX_ETHPORTS];
+static uint16_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -150,7 +149,7 @@ static void
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
- uint8_t port_id = (uintptr_t)userdata;
+ uint16_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
@@ -161,7 +160,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
}
static void
-configure_tx_buffer(uint8_t port_id, uint16_t size)
+configure_tx_buffer(uint16_t port_id, uint16_t size)
{
int ret;
@@ -171,15 +170,16 @@ configure_tx_buffer(uint8_t port_id, uint16_t size)
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
/*
@@ -195,8 +195,8 @@ configure_output_ports(const struct port_info *ports)
rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n",
(unsigned)RTE_MAX_ETHPORTS);
for (i = 0; i < ports->num_ports - 1; i+=2){
- uint8_t p1 = ports->id[i];
- uint8_t p2 = ports->id[i+1];
+ uint16_t p1 = ports->id[i];
+ uint16_t p2 = ports->id[i+1];
output_ports[p1] = p2;
output_ports[p2] = p1;
@@ -215,8 +215,8 @@ static void
handle_packet(struct rte_mbuf *buf)
{
int sent;
- const uint8_t in_port = buf->port;
- const uint8_t out_port = output_ports[in_port];
+ const uint16_t in_port = buf->port;
+ const uint16_t out_port = output_ports[in_port];
struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
sent = rte_eth_tx_buffer(out_port, client_id, buffer, buf);
@@ -275,7 +275,7 @@ main(int argc, char *argv[])
for (;;) {
uint16_t i, rx_pkts;
- uint8_t port;
+ uint16_t port;
rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
PKT_READ_SIZE, NULL);
diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile
index 5552999b..160c17b6 100644
--- a/examples/multi_process/client_server_mp/mp_server/Makefile
+++ b/examples/multi_process/client_server_mp/mp_server/Makefile
@@ -49,7 +49,7 @@ APP = mp_server
# all source are stored in SRCS-y
SRCS-y := main.c init.c args.c
-INC := $(wildcard *.h)
+INC := $(sort $(wildcard *.h))
CFLAGS += $(WERROR_FLAGS) -O3
CFLAGS += -I$(SRCDIR)/../shared
diff --git a/examples/multi_process/client_server_mp/mp_server/args.c b/examples/multi_process/client_server_mp/mp_server/args.c
index bf8c666c..a65884fd 100644
--- a/examples/multi_process/client_server_mp/mp_server/args.c
+++ b/examples/multi_process/client_server_mp/mp_server/args.c
@@ -74,7 +74,7 @@ parse_portmask(uint8_t max_ports, const char *portmask)
{
char *end = NULL;
unsigned long pm;
- uint8_t count = 0;
+ uint16_t count = 0;
if (portmask == NULL || *portmask == '\0')
return -1;
@@ -128,7 +128,7 @@ parse_num_clients(const char *clients)
* on error.
*/
int
-parse_app_args(uint8_t max_ports, int argc, char *argv[])
+parse_app_args(uint16_t max_ports, int argc, char *argv[])
{
int option_index, opt;
char **argvopt = argv;
diff --git a/examples/multi_process/client_server_mp/mp_server/args.h b/examples/multi_process/client_server_mp/mp_server/args.h
index 23af1bd3..33888b89 100644
--- a/examples/multi_process/client_server_mp/mp_server/args.h
+++ b/examples/multi_process/client_server_mp/mp_server/args.h
@@ -34,6 +34,6 @@
#ifndef _ARGS_H_
#define _ARGS_H_
-int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
+int parse_app_args(uint16_t max_ports, int argc, char *argv[]);
#endif /* ifndef _ARGS_H_ */
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 0bc92921..c8d02113 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -56,7 +56,6 @@
#include <rte_memcpy.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -114,7 +113,7 @@ init_mbuf_pools(void)
* - start the port and report its status to stdout
*/
static int
-init_port(uint8_t port_num)
+init_port(uint16_t port_num)
{
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
@@ -129,7 +128,7 @@ init_port(uint8_t port_num)
uint16_t q;
int retval;
- printf("Port %u init ... ", (unsigned)port_num);
+ printf("Port %u init ... ", port_num);
fflush(stdout);
/* Standard DPDK port initialisation - config port, then set up
@@ -200,11 +199,12 @@ init_shm_rings(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -262,7 +262,7 @@ init(int argc, char *argv[])
{
int retval;
const struct rte_memzone *mz;
- uint8_t i, total_ports;
+ uint16_t i, total_ports;
/* init EAL, parsing EAL args */
retval = rte_eal_init(argc, argv);
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 7055b543..6eda556a 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -44,7 +44,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -59,7 +58,6 @@
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
@@ -88,7 +86,7 @@ struct client_rx_buf {
static struct client_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
diff --git a/examples/multi_process/client_server_mp/shared/common.h b/examples/multi_process/client_server_mp/shared/common.h
index 631c4632..35a3b01d 100644
--- a/examples/multi_process/client_server_mp/shared/common.h
+++ b/examples/multi_process/client_server_mp/shared/common.h
@@ -57,8 +57,8 @@ struct tx_stats{
} __rte_cache_aligned;
struct port_info {
- uint8_t num_ports;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t num_ports;
+ uint16_t id[RTE_MAX_ETHPORTS];
volatile struct rx_stats rx_stats;
volatile struct tx_stats tx_stats[MAX_CLIENTS];
};
diff --git a/examples/multi_process/l2fwd_fork/flib.c b/examples/multi_process/l2fwd_fork/flib.c
index c22e983b..a3f1d275 100644
--- a/examples/multi_process/l2fwd_fork/flib.c
+++ b/examples/multi_process/l2fwd_fork/flib.c
@@ -54,7 +54,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -64,7 +63,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index f8a626ba..deace273 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -51,7 +51,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -62,7 +61,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -140,7 +138,8 @@ struct lcore_resource_struct {
/* ring[1] for slave send ack, master read */
struct rte_ring *ring[2];
int port_num; /* Total port numbers */
- uint8_t port[RTE_MAX_ETHPORTS]; /* Port id for that lcore to receive packets */
+ /* Port id for that lcore to receive packets */
+ uint16_t port[RTE_MAX_ETHPORTS];
}__attribute__((packed)) __rte_cache_aligned;
static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
@@ -871,11 +870,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -890,14 +890,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up- speed %u Mbps- %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -930,9 +929,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned rx_lcore_id;
unsigned nb_ports_in_mask = 0;
unsigned i;
@@ -1204,10 +1203,7 @@ main(int argc, char **argv)
message_pool = rte_mempool_create("ms_msg_pool",
NB_CORE_MSGBUF * RTE_MAX_LCORE,
sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
- 0,
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
if (message_pool == NULL)
rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
diff --git a/examples/multi_process/simple_mp/main.c b/examples/multi_process/simple_mp/main.c
index 2843d94e..62537b02 100644
--- a/examples/multi_process/simple_mp/main.c
+++ b/examples/multi_process/simple_mp/main.c
@@ -54,7 +54,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -67,6 +66,7 @@
#include <rte_mempool.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
#include <cmdline_socket.h>
#include <cmdline.h>
#include "mp_commands.h"
@@ -76,7 +76,6 @@
static const char *_MSG_POOL = "MSG_POOL";
static const char *_SEC_2_PRI = "SEC_2_PRI";
static const char *_PRI_2_SEC = "PRI_2_SEC";
-const unsigned string_size = 64;
struct rte_ring *send_ring, *recv_ring;
struct rte_mempool *message_pool;
@@ -121,7 +120,7 @@ main(int argc, char **argv)
send_ring = rte_ring_create(_PRI_2_SEC, ring_size, rte_socket_id(), flags);
recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, rte_socket_id(), flags);
message_pool = rte_mempool_create(_MSG_POOL, pool_size,
- string_size, pool_cache, priv_data_sz,
+ STR_TOKEN_SIZE, pool_cache, priv_data_sz,
NULL, NULL, NULL, NULL,
rte_socket_id(), flags);
} else {
diff --git a/examples/multi_process/simple_mp/mp_commands.c b/examples/multi_process/simple_mp/mp_commands.c
index 8da244bb..ef6dc58d 100644
--- a/examples/multi_process/simple_mp/mp_commands.c
+++ b/examples/multi_process/simple_mp/mp_commands.c
@@ -42,7 +42,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
@@ -78,7 +77,7 @@ static void cmd_send_parsed(void *parsed_result,
if (rte_mempool_get(message_pool, &msg) < 0)
rte_panic("Failed to get message buffer\n");
- snprintf((char *)msg, string_size, "%s", res->message);
+ snprintf((char *)msg, STR_TOKEN_SIZE, "%s", res->message);
if (rte_ring_enqueue(send_ring, msg) < 0) {
printf("Failed to send message - message discarded\n");
rte_mempool_put(message_pool, msg);
diff --git a/examples/multi_process/simple_mp/mp_commands.h b/examples/multi_process/simple_mp/mp_commands.h
index 7e9a4ab2..452b3645 100644
--- a/examples/multi_process/simple_mp/mp_commands.h
+++ b/examples/multi_process/simple_mp/mp_commands.h
@@ -34,7 +34,6 @@
#ifndef _SIMPLE_MP_COMMANDS_H_
#define _SIMPLE_MP_COMMANDS_H_
-extern const unsigned string_size;
extern struct rte_ring *send_ring;
extern struct rte_mempool *message_pool;
extern volatile int quit;
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 0f497910..6fb285c7 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -56,7 +56,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -65,7 +64,6 @@
#include <rte_branch_prediction.h>
#include <rte_debug.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_mempool.h>
@@ -102,7 +100,7 @@ struct port_stats{
static int proc_id = -1;
static unsigned num_procs = 0;
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0;
static struct lcore_ports lcore_ports[RTE_MAX_LCORE];
@@ -202,7 +200,8 @@ smp_parse_args(int argc, char **argv)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
+smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
+ uint16_t num_queues)
{
struct rte_eth_conf port_conf = {
.rxmode = {
@@ -237,7 +236,7 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
if (port >= rte_eth_dev_count())
return -1;
- printf("# Initialising port %u... ", (unsigned)port);
+ printf("# Initialising port %u... ", port);
fflush(stdout);
rte_eth_dev_info_get(port, &info);
@@ -362,11 +361,12 @@ lcore_main(void *arg __rte_unused)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -381,14 +381,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index af2d9f3f..12b3fcbe 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_memzone.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
@@ -129,7 +128,7 @@ static void netmap_unregif(uint32_t idx, uint32_t port);
static int32_t
-ifname_to_portid(const char *ifname, uint8_t *port)
+ifname_to_portid(const char *ifname, uint16_t *port)
{
char *endptr;
uint64_t portid;
@@ -140,7 +139,7 @@ ifname_to_portid(const char *ifname, uint8_t *port)
portid >= RTE_DIM(ports) || errno != 0)
return -EINVAL;
- *port = (uint8_t)portid;
+ *port = portid;
return 0;
}
@@ -222,10 +221,10 @@ fd_release(int32_t fd)
}
static int
-check_nmreq(struct nmreq *req, uint8_t *port)
+check_nmreq(struct nmreq *req, uint16_t *port)
{
int32_t rc;
- uint8_t portid;
+ uint16_t portid;
if (req == NULL)
return -EINVAL;
@@ -242,7 +241,7 @@ check_nmreq(struct nmreq *req, uint8_t *port)
}
if (ports[portid].pool == NULL) {
- RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
+ RTE_LOG(ERR, USER1, "Misconfigured portid %u\n", portid);
return -EINVAL;
}
@@ -262,7 +261,7 @@ check_nmreq(struct nmreq *req, uint8_t *port)
static int
ioctl_niocginfo(__rte_unused int fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
struct nmreq *req;
int32_t rc;
@@ -283,7 +282,7 @@ ioctl_niocginfo(__rte_unused int fd, void * param)
}
static void
-netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
+netmap_ring_setup(struct netmap_ring *ring, uint16_t port, uint32_t ringid,
uint32_t num_slots)
{
uint32_t j;
@@ -305,7 +304,7 @@ netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
}
static int
-netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
+netmap_regif(struct nmreq *req, uint32_t idx, uint16_t port)
{
struct netmap_if *nmif;
struct netmap_ring *ring;
@@ -313,7 +312,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
int32_t rc;
if (ports[port].fd < RTE_DIM(fd_port)) {
- RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
+ RTE_LOG(ERR, USER1, "port %u already in use by fd: %u\n",
port, IDX_TO_FD(ports[port].fd));
return -EBUSY;
}
@@ -399,7 +398,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
static int
ioctl_niocregif(int32_t fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
int32_t rc;
uint32_t idx;
struct nmreq *req;
@@ -422,7 +421,7 @@ netmap_unregif(uint32_t idx, uint32_t port)
{
fd_port[idx].port = FD_PORT_RSRV;
ports[port].fd = UINT32_MAX;
- rte_eth_dev_stop((uint8_t)port);
+ rte_eth_dev_stop(port);
}
/**
@@ -460,7 +459,7 @@ ioctl_niocunregif(int fd)
* packets as it can hold coming from its dpdk port.
*/
static inline int
-rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+rx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
uint16_t max_burst)
{
int32_t i, n_rx;
@@ -513,7 +512,7 @@ rx_sync_if(uint32_t port)
for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
r = NETMAP_RXRING(nifp, i);
- rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
+ rx_sync_ring(r, port, (uint16_t)i, burst);
rc += r->avail;
}
@@ -542,7 +541,7 @@ ioctl_niocrxsync(int fd)
* buffers into rte_mbufs and sending them out on the rings's dpdk port.
*/
static int
-tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+tx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
struct rte_mempool *pool, uint16_t max_burst)
{
uint32_t i, n_tx;
@@ -608,7 +607,7 @@ tx_sync_if(uint32_t port)
for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
r = NETMAP_TXRING(nifp, i);
- tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
+ tx_sync_ring(r, port, (uint16_t)i, mp, burst);
rc += r->avail;
}
@@ -686,7 +685,7 @@ rte_netmap_init(const struct rte_netmap_conf *conf)
int
-rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
+rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf)
{
int32_t ret;
uint16_t i;
@@ -696,17 +695,17 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
portid >= RTE_DIM(ports) ||
conf->nr_tx_rings > netmap.conf.max_rings ||
conf->nr_rx_rings > netmap.conf.max_rings) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
- rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
- tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
+ rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
+ tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
if (tx_slots > netmap.conf.max_slots ||
rx_slots > netmap.conf.max_slots) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
@@ -715,15 +714,15 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
conf->nr_tx_rings, conf->eth_conf);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
- return ret;
+ RTE_LOG(ERR, USER1, "Couldn't configure port %u\n", portid);
+ return ret;
}
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't ot adjust number of descriptors for port %hhu\n",
+ "Couldn't ot adjust number of descriptors for port %u\n",
portid);
return ret;
}
@@ -734,8 +733,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure TX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure TX queue %u of port %u\n",
i, portid);
return ret;
}
@@ -745,8 +743,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure RX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure RX queue %u of port %u\n",
i, portid);
return ret;
}
diff --git a/examples/netmap_compat/lib/compat_netmap.h b/examples/netmap_compat/lib/compat_netmap.h
index 3dc7a2f4..76b2d2b4 100644
--- a/examples/netmap_compat/lib/compat_netmap.h
+++ b/examples/netmap_compat/lib/compat_netmap.h
@@ -67,7 +67,7 @@ struct rte_netmap_port_conf {
};
int rte_netmap_init(const struct rte_netmap_conf *conf);
-int rte_netmap_init_port(uint8_t portid,
+int rte_netmap_init_port(uint16_t portid,
const struct rte_netmap_port_conf *conf);
int rte_netmap_close(int fd);
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index b26c33df..3add7be4 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -269,21 +269,22 @@ configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
return 0;
}
static inline int
-configure_eth_port(uint8_t port_id)
+configure_eth_port(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 1, txRings = 1;
@@ -326,7 +327,7 @@ configure_eth_port(uint8_t port_id)
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port_id,
+ port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -401,7 +402,7 @@ rx_thread(struct rte_ring *ring_out)
uint32_t seqn = 0;
uint16_t i, ret = 0;
uint16_t nb_rx_pkts;
- uint8_t port_id;
+ uint16_t port_id;
struct rte_mbuf *pkts[MAX_PKTS_BURST];
RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
@@ -632,8 +633,8 @@ main(int argc, char **argv)
int ret;
unsigned nb_ports;
unsigned int lcore_id, last_lcore_id, master_lcore_id;
- uint8_t port_id;
- uint8_t nb_ports_available;
+ uint16_t port_id;
+ uint16_t nb_ports_available;
struct worker_thread_args worker_args = {NULL, NULL};
struct send_thread_args send_args = {NULL, NULL};
struct rte_ring *rx_to_workers;
@@ -687,7 +688,7 @@ main(int argc, char **argv)
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) port_id);
+ printf("Initializing port %u... done\n", port_id);
if (configure_eth_port(port_id) != 0)
rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
diff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h
index 5c2c1a5f..0cde5919 100644
--- a/examples/performance-thread/common/lthread.h
+++ b/examples/performance-thread/common/lthread.h
@@ -87,7 +87,7 @@ int _lthread_desched_sleep(struct lthread *lt);
void _lthread_free(struct lthread *lt);
-struct lthread_sched *_lthread_sched_get(int lcore_id);
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id);
struct lthread_stack *_stack_alloc(void);
diff --git a/examples/performance-thread/common/lthread_diag.c b/examples/performance-thread/common/lthread_diag.c
index bce1a0c3..b5007d77 100644
--- a/examples/performance-thread/common/lthread_diag.c
+++ b/examples/performance-thread/common/lthread_diag.c
@@ -296,8 +296,7 @@ _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
/*
* plug in default diag callback with mask off
*/
-void _lthread_diag_ctor(void)__attribute__((constructor));
-void _lthread_diag_ctor(void)
+RTE_INIT(_lthread_diag_ctor)
{
diag_cb = _lthread_diag_default_cb;
diag_mask = 0;
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 98291478..779aeb17 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -117,8 +117,7 @@ uint64_t diag_mask;
/* constructor */
-void lthread_sched_ctor(void) __attribute__ ((constructor));
-void lthread_sched_ctor(void)
+RTE_INIT(lthread_sched_ctor)
{
memset(schedcore, 0, sizeof(schedcore));
rte_atomic16_init(&num_schedulers);
@@ -562,11 +561,14 @@ void lthread_run(void)
* Return the scheduler for this lcore
*
*/
-struct lthread_sched *_lthread_sched_get(int lcore_id)
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id)
{
- if (lcore_id > LTHREAD_MAX_LCORES)
- return NULL;
- return schedcore[lcore_id];
+ struct lthread_sched *res = NULL;
+
+ if (lcore_id < LTHREAD_MAX_LCORES)
+ res = schedcore[lcore_id];
+
+ return res;
}
/*
@@ -578,10 +580,9 @@ int lthread_set_affinity(unsigned lcoreid)
struct lthread *lt = THIS_LTHREAD;
struct lthread_sched *dest_sched;
- if (unlikely(lcoreid > LTHREAD_MAX_LCORES))
+ if (unlikely(lcoreid >= LTHREAD_MAX_LCORES))
return POSIX_ERRNO(EINVAL);
-
DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);
dest_sched = schedcore[lcoreid];
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 47505f2d..2259fad4 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -62,9 +62,7 @@ RTE_DEFINE_PER_LTHREAD(void *, dummy);
static struct lthread_key key_table[LTHREAD_MAX_KEYS];
-void lthread_tls_ctor(void) __attribute__((constructor));
-
-void lthread_tls_ctor(void)
+RTE_INIT(thread_tls_ctor)
{
key_pool = NULL;
key_pool_init = 0;
@@ -198,11 +196,12 @@ void _lthread_tls_destroy(struct lthread *lt)
void
*lthread_getspecific(unsigned int k)
{
+ void *res = NULL;
- if (k > LTHREAD_MAX_KEYS)
- return NULL;
+ if (k < LTHREAD_MAX_KEYS)
+ res = THIS_LTHREAD->tls->data[k];
- return THIS_LTHREAD->tls->data[k];
+ return res;
}
/*
@@ -212,7 +211,7 @@ void
*/
int lthread_setspecific(unsigned int k, const void *data)
{
- if (k > LTHREAD_MAX_KEYS)
+ if (k >= LTHREAD_MAX_KEYS)
return POSIX_ERRNO(EINVAL);
int n = THIS_LTHREAD->tls->nb_keys_inuse;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 7954b974..fa65234f 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -50,7 +50,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -140,7 +138,7 @@ parse_ptype(struct rte_mbuf *m)
}
static uint16_t
-cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
+cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
{
@@ -277,7 +275,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -287,7 +285,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct rx_thread_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
uint8_t thread_id;
@@ -648,7 +646,7 @@ struct thread_tx_conf tx_thread[MAX_TX_THREAD];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct thread_tx_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct thread_tx_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -669,7 +667,7 @@ send_burst(struct thread_tx_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
struct thread_tx_conf *qconf;
@@ -696,7 +694,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline void
-send_packetsx4(uint8_t port,
+send_packetsx4(uint16_t port,
struct rte_mbuf *m[], uint32_t num)
{
uint32_t len, j, n;
@@ -832,8 +830,8 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
static __m128i mask0;
static __m128i mask1;
static __m128i mask2;
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
int ret = 0;
@@ -846,11 +844,11 @@ get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
key.xmm = _mm_and_si128(data, mask0);
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
int ret = 0;
@@ -873,36 +871,36 @@ get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t)((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup6_struct_t *ipv6_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
+ return ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
next_hop : portid);
}
#endif
-static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
__attribute__((unused));
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
@@ -919,11 +917,11 @@ static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
#define EXCLUDE_8TH_PKT 0x7f
static inline void
-simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
struct ether_hdr *eth_hdr[8];
struct ipv4_hdr *ipv4_hdr[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
int32_t ret[8];
union ipv4_5tuple_host key[8];
__m128i data[8];
@@ -1042,14 +1040,14 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
&key_array[0], 8, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
@@ -1146,10 +1144,10 @@ static inline void get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
}
static inline void
-simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
int32_t ret[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
struct ether_hdr *eth_hdr[8];
union ipv6_5tuple_host key[8];
@@ -1196,14 +1194,14 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
&key_array[0], 4, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
@@ -1250,24 +1248,24 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
ether_addr_copy(&ports_eth_addr[dst_port[6]], &eth_hdr[6]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[7]], &eth_hdr[7]->s_addr);
- send_single_packet(m[0], (uint8_t)dst_port[0]);
- send_single_packet(m[1], (uint8_t)dst_port[1]);
- send_single_packet(m[2], (uint8_t)dst_port[2]);
- send_single_packet(m[3], (uint8_t)dst_port[3]);
- send_single_packet(m[4], (uint8_t)dst_port[4]);
- send_single_packet(m[5], (uint8_t)dst_port[5]);
- send_single_packet(m[6], (uint8_t)dst_port[6]);
- send_single_packet(m[7], (uint8_t)dst_port[7]);
+ send_single_packet(m[0], dst_port[0]);
+ send_single_packet(m[1], dst_port[1]);
+ send_single_packet(m[2], dst_port[2]);
+ send_single_packet(m[3], dst_port[3]);
+ send_single_packet(m[4], dst_port[4]);
+ send_single_packet(m[5], dst_port[5]);
+ send_single_packet(m[6], dst_port[6]);
+ send_single_packet(m[7], dst_port[7]);
}
#endif /* APP_LOOKUP_METHOD */
static __rte_always_inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -1379,7 +1377,7 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline uint16_t
-get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
+get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
@@ -1406,7 +1404,7 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
}
static inline void
-process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint8_t portid)
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -1473,7 +1471,7 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
static inline void
processx4_step2(__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -1716,7 +1714,8 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
static void
process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
- uint8_t portid) {
+ uint16_t portid)
+{
int j;
@@ -2036,7 +2035,7 @@ static void
lthread_tx_per_ring(void *dummy)
{
int nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct rte_ring *ring;
struct thread_tx_conf *tx_conf;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -2091,7 +2090,7 @@ lthread_tx(void *args)
struct lthread *lt;
unsigned lcore_id;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
tx_conf = (struct thread_tx_conf *)args;
@@ -2138,7 +2137,8 @@ lthread_rx(void *dummy)
int ret;
uint16_t nb_rx;
int i;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
int worker_id;
int len[RTE_MAX_LCORE] = { 0 };
int old_len, new_len;
@@ -2164,7 +2164,8 @@ lthread_rx(void *dummy)
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
rte_lcore_id(), portid, queueid);
}
@@ -2323,7 +2324,7 @@ pthread_tx(void *dummy)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
uint64_t prev_tsc, diff_tsc, cur_tsc;
int nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -2392,7 +2393,8 @@ pthread_rx(void *dummy)
uint32_t n;
uint32_t nb_rx;
unsigned lcore_id;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct thread_rx_conf *rx_conf;
@@ -2411,7 +2413,8 @@ pthread_rx(void *dummy)
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -2539,7 +2542,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -2769,7 +2772,7 @@ parse_rx_config(const char *q_arg)
return -1;
}
rx_thread_params_array[nb_rx_thread_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ int_fld[FLD_PORT];
rx_thread_params_array[nb_rx_thread_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
rx_thread_params_array[nb_rx_thread_params].lcore_id =
@@ -2853,7 +2856,7 @@ parse_stat_lcore(const char *stat_lcore)
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
@@ -3436,11 +3439,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -3455,14 +3459,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -3497,10 +3500,10 @@ main(int argc, char **argv)
int ret;
int i;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index 850b009d..febae39b 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -161,6 +161,7 @@ static void initial_lthread(void *args __attribute__((unused)))
pthread_override_set(1);
uint64_t i;
+ int ret;
/* initialize mutex for shared counter */
print_count = 0;
@@ -187,7 +188,10 @@ static void initial_lthread(void *args __attribute__((unused)))
pthread_attr_setaffinity_np(&attr, sizeof(rte_cpuset_t), &cpuset);
/* create the thread */
- pthread_create(&tid[i], &attr, helloworld_pthread, (void *) i);
+ ret = pthread_create(&tid[i], &attr,
+ helloworld_pthread, (void *) i);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create helloworld thread\n");
}
/* wait for 1s to allow threads
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index 113bafa0..bc7cf2b0 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -202,10 +202,7 @@ static void *__libc_dl_handle = RTLD_NEXT;
* The constructor function initialises the
* function pointers for pthread library functions
*/
-void
-pthread_intercept_ctor(void)__attribute__((constructor));
-void
-pthread_intercept_ctor(void)
+RTE_INIT(pthread_intercept_ctor)
{
override = 0;
/*
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index ddfcdb83..c53dad68 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -158,12 +158,12 @@ struct ptpv2_data_slave_ordinary {
struct clock_id master_clock_id;
struct timeval new_adj;
int64_t delta;
- uint8_t portid;
+ uint16_t portid;
uint16_t seqID_SYNC;
uint16_t seqID_FOLLOWUP;
uint8_t ptpset;
uint8_t kernel_time_set;
- uint8_t current_ptp_port;
+ uint16_t current_ptp_port;
};
static struct ptpv2_data_slave_ordinary ptp_data;
@@ -202,7 +202,7 @@ ns_to_timeval(int64_t nsec)
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = port_conf_default;
@@ -555,7 +555,7 @@ parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data)
* functionality.
*/
static void
-parse_ptp_frames(uint8_t portid, struct rte_mbuf *m) {
+parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
struct ptp_header *ptp_hdr;
struct ether_hdr *eth_hdr;
uint16_t eth_type;
@@ -593,7 +593,7 @@ parse_ptp_frames(uint8_t portid, struct rte_mbuf *m) {
static __attribute__((noreturn)) void
lcore_main(void)
{
- uint8_t portid;
+ uint16_t portid;
unsigned nb_rx;
struct rte_mbuf *m;
@@ -728,7 +728,7 @@ main(int argc, char *argv[])
{
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index b0909f6a..67b4a75b 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -116,8 +116,8 @@ static struct rte_eth_conf port_conf = {
#define PKT_TX_BURST_MAX 32
#define TIME_TX_DRAIN 200000ULL
-static uint8_t port_rx;
-static uint8_t port_tx;
+static uint16_t port_rx;
+static uint16_t port_tx;
static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
struct rte_eth_dev_tx_buffer *tx_buffer;
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 2350d64f..203a3470 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -252,8 +252,8 @@ app_parse_flow_conf(const char *conf_str)
pconf = &qos_conf[nb_pfc];
- pconf->rx_port = (uint8_t)vals[0];
- pconf->tx_port = (uint8_t)vals[1];
+ pconf->rx_port = vals[0];
+ pconf->tx_port = vals[1];
pconf->rx_core = (uint8_t)vals[2];
pconf->wt_core = (uint8_t)vals[3];
if (ret == 5)
@@ -267,19 +267,19 @@ app_parse_flow_conf(const char *conf_str)
}
if (pconf->rx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu16" index\n",
nb_pfc, pconf->rx_port);
return -1;
}
if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu16" index\n",
nb_pfc, pconf->tx_port);
return -1;
}
mask = 1lu << pconf->rx_port;
if (app_used_rx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu16" is used already\n",
nb_pfc, pconf->rx_port);
return -1;
}
@@ -288,7 +288,7 @@ app_parse_flow_conf(const char *conf_str)
mask = 1lu << pconf->tx_port;
if (app_used_tx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: port %"PRIu16" is used already\n",
nb_pfc, pconf->tx_port);
return -1;
}
diff --git a/examples/qos_sched/cmdline.c b/examples/qos_sched/cmdline.c
index f79d5246..b62d165b 100644
--- a/examples/qos_sched/cmdline.c
+++ b/examples/qos_sched/cmdline.c
@@ -191,7 +191,7 @@ cmdline_parse_inst_t cmd_appstats = {
struct cmd_subportstats_result {
cmdline_fixed_string_t stats_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
};
@@ -220,7 +220,7 @@ cmdline_parse_token_num_t cmd_subportstats_subport_number =
UINT32);
cmdline_parse_token_num_t cmd_subportstats_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_subportstats_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_inst_t cmd_subportstats = {
.f = cmd_subportstats_parsed,
@@ -240,7 +240,7 @@ cmdline_parse_inst_t cmd_subportstats = {
struct cmd_pipestats_result {
cmdline_fixed_string_t stats_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -265,7 +265,7 @@ cmdline_parse_token_string_t cmd_pipestats_port_string =
"port");
cmdline_parse_token_num_t cmd_pipestats_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_pipestats_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_pipestats_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, subport_string,
"subport");
@@ -299,7 +299,7 @@ cmdline_parse_inst_t cmd_pipestats = {
struct cmd_avg_q_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -327,8 +327,8 @@ cmdline_parse_token_string_t cmd_avg_q_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, port_string,
"port");
cmdline_parse_token_num_t cmd_avg_q_port_number =
- TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, port_number,
- UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, port_number,
+ UINT16);
cmdline_parse_token_string_t cmd_avg_q_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, subport_string,
"subport");
@@ -378,7 +378,7 @@ cmdline_parse_inst_t cmd_avg_q = {
struct cmd_avg_tcpipe_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -405,7 +405,7 @@ cmdline_parse_token_string_t cmd_avg_tcpipe_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_tcpipe_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_tcpipe_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, subport_string,
"subport");
@@ -447,7 +447,7 @@ cmdline_parse_inst_t cmd_avg_tcpipe = {
struct cmd_avg_pipe_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -472,7 +472,7 @@ cmdline_parse_token_string_t cmd_avg_pipe_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_pipe_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_pipe_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_pipe_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, subport_string,
"subport");
@@ -506,7 +506,7 @@ cmdline_parse_inst_t cmd_avg_pipe = {
struct cmd_avg_tcsubport_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t tc_string;
@@ -531,7 +531,7 @@ cmdline_parse_token_string_t cmd_avg_tcsubport_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_tcsubport_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_tcsubport_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_tcsubport_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, subport_string,
"subport");
@@ -565,7 +565,7 @@ cmdline_parse_inst_t cmd_avg_tcsubport = {
struct cmd_avg_subport_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
};
@@ -588,7 +588,7 @@ cmdline_parse_token_string_t cmd_avg_subport_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_subport_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_subport_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_subport_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_subport_result, subport_string,
"subport");
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index a82cbd7d..038f0427 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -100,7 +100,7 @@ static const struct rte_eth_conf port_conf = {
};
static int
-app_init_port(uint8_t portid, struct rte_mempool *mp)
+app_init_port(uint16_t portid, struct rte_mempool *mp)
{
int ret;
struct rte_eth_link link;
@@ -118,6 +118,7 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
rx_conf.rx_free_thresh = 32;
rx_conf.rx_drop_en = 0;
+ rx_conf.rx_deferred_start = 0;
tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
@@ -125,21 +126,24 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
tx_conf.tx_free_thresh = 0;
tx_conf.tx_rs_thresh = 0;
tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
+ tx_conf.tx_deferred_start = 0;
/* init port */
- RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
+ RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
rx_size = ring_conf.rx_size;
tx_size = ring_conf.tx_size;
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
+ ret, portid);
ring_conf.rx_size = rx_size;
ring_conf.tx_size = tx_size;
@@ -148,22 +152,25 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
rte_eth_dev_socket_id(portid), &rx_conf, mp);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+ ret, portid);
/* init one TX queue */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0,
(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
- "port=%"PRIu8" queue=%d\n", ret, portid, 0);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
+ ret, portid, 0);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_pmd_port_start: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_pmd_port_start: err=%d, port=%u\n",
+ ret, portid);
printf("done: ");
@@ -256,7 +263,7 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
uint32_t pipe, subport;
int err;
- rte_eth_link_get((uint8_t)portid, &link);
+ rte_eth_link_get(portid, &link);
port_params.socket = socketid;
port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
index e10cfd44..1e2fb189 100644
--- a/examples/qos_sched/main.c
+++ b/examples/qos_sched/main.c
@@ -125,8 +125,7 @@ app_main_loop(__attribute__((unused))void *dummy)
/* initialize mbuf memory */
if (mode == APP_RX_MODE) {
for (i = 0; i < rx_idx; i++) {
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "reading port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u reading port%u\n",
i, lcore_id, rx_confs[i]->rx_port);
}
@@ -140,8 +139,8 @@ app_main_loop(__attribute__((unused))void *dummy)
if (wt_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u sched+write "
- "port %"PRIu8"\n",
+ RTE_LOG(INFO, APP,
+ "flow %u lcoreid %u sched+write port %u\n",
i, lcore_id, wt_confs[i]->tx_port);
}
@@ -155,8 +154,7 @@ app_main_loop(__attribute__((unused))void *dummy)
if (tx_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "writing port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u write port%u\n",
i, lcore_id, tx_confs[i]->tx_port);
}
@@ -186,7 +184,7 @@ app_stat(void)
struct flow_conf *flow = &qos_conf[i];
rte_eth_stats_get(flow->rx_port, &stats);
- printf("\nRX port %"PRIu8": rx: %"PRIu64 " err: %"PRIu64
+ printf("\nRX port %"PRIu16": rx: %"PRIu64 " err: %"PRIu64
" no_mbuf: %"PRIu64 "\n",
flow->rx_port,
stats.ipackets - rx_stats[i].ipackets,
@@ -195,7 +193,7 @@ app_stat(void)
memcpy(&rx_stats[i], &stats, sizeof(stats));
rte_eth_stats_get(flow->tx_port, &stats);
- printf("TX port %"PRIu8": tx: %" PRIu64 " err: %" PRIu64 "\n",
+ printf("TX port %"PRIu16": tx: %" PRIu64 " err: %" PRIu64 "\n",
flow->tx_port,
stats.opackets - tx_stats[i].opackets,
stats.oerrors - tx_stats[i].oerrors);
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 8d02e1ad..77b6e3ee 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -106,8 +106,8 @@ struct thread_conf
uint32_t n_mbufs;
struct rte_mbuf **m_table;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
@@ -125,8 +125,8 @@ struct flow_conf
uint32_t rx_core;
uint32_t wt_core;
uint32_t tx_core;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
@@ -188,13 +188,15 @@ void app_worker_thread(struct thread_conf **qconf);
void app_mixed_thread(struct thread_conf **qconf);
void app_stat(void);
-int subport_stat(uint8_t port_id, uint32_t subport_id);
-int pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q);
-int qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc);
-int qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc);
-int qavg_subport(uint8_t port_id, uint32_t subport_id);
+int subport_stat(uint16_t port_id, uint32_t subport_id);
+int pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc, uint8_t q);
+int qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc);
+int qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc);
+int qavg_subport(uint16_t port_id, uint32_t subport_id);
#ifdef __cplusplus
}
diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c
index 5c894455..b5545e10 100644
--- a/examples/qos_sched/stats.c
+++ b/examples/qos_sched/stats.c
@@ -37,7 +37,8 @@
#include "main.h"
int
-qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q)
+qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
+ uint8_t q)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -74,7 +75,8 @@ qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8
}
int
-qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
+qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -114,7 +116,7 @@ qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
}
int
-qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -153,7 +155,7 @@ qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
}
int
-qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
+qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -195,7 +197,7 @@ qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
}
int
-qavg_subport(uint8_t port_id, uint32_t subport_id)
+qavg_subport(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -237,7 +239,7 @@ qavg_subport(uint8_t port_id, uint32_t subport_id)
}
int
-subport_stat(uint8_t port_id, uint32_t subport_id)
+subport_stat(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_subport_stats stats;
struct rte_sched_port *port;
@@ -273,7 +275,7 @@ subport_stat(uint8_t port_id, uint32_t subport_id)
}
int
-pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 083a37a9..37b03626 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -73,7 +73,7 @@ static struct rte_eth_fc_conf fc_conf = {
};
-void configure_eth_port(uint8_t port_id)
+void configure_eth_port(uint16_t port_id)
{
int ret;
uint16_t nb_rxd = RX_DESC_PER_QUEUE;
@@ -135,7 +135,7 @@ init_dpdk(void)
rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
}
-void init_ring(int lcore_id, uint8_t port_id)
+void init_ring(int lcore_id, uint16_t port_id)
{
struct rte_ring *ring;
char ring_name[RTE_RING_NAMESIZE];
@@ -156,12 +156,12 @@ void init_ring(int lcore_id, uint8_t port_id)
void
pair_ports(void)
{
- uint8_t i, j;
+ uint16_t i, j;
/* Pair ports with their "closest neighbour" in the portmask */
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
if (is_bit_set(i, portmask))
- for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
+ for (j = i + 1; j < RTE_MAX_ETHPORTS; j++)
if (is_bit_set(j, portmask)) {
port_pairs[i] = j;
port_pairs[j] = i;
diff --git a/examples/quota_watermark/qw/init.h b/examples/quota_watermark/qw/init.h
index 6d0af3ab..2bfec2b1 100644
--- a/examples/quota_watermark/qw/init.h
+++ b/examples/quota_watermark/qw/init.h
@@ -34,9 +34,9 @@
#ifndef _INIT_H_
#define _INIT_H_
-void configure_eth_port(uint8_t port_id);
+void configure_eth_port(uint16_t port_id);
void init_dpdk(void);
-void init_ring(int lcore_id, uint8_t port_id);
+void init_ring(int lcore_id, uint16_t port_id);
void pair_ports(void);
void setup_shared_variables(void);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index d4fcfde4..fe174526 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -69,13 +69,13 @@ int *quota;
unsigned int *low_watermark;
unsigned int *high_watermark;
-uint8_t port_pairs[RTE_MAX_ETHPORTS];
+uint16_t port_pairs[RTE_MAX_ETHPORTS];
struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
struct rte_mempool *mbuf_pool;
-static void send_pause_frame(uint8_t port_id, uint16_t duration)
+static void send_pause_frame(uint16_t port_id, uint16_t duration)
{
struct rte_mbuf *mbuf;
struct ether_fc_frame *pause_frame;
@@ -155,7 +155,7 @@ receive_stage(__attribute__((unused)) void *args)
{
int i, ret;
- uint8_t port_id;
+ uint16_t port_id;
uint16_t nb_rx_pkts;
unsigned int lcore_id;
@@ -216,7 +216,7 @@ pipeline_stage(__attribute__((unused)) void *args)
int i, ret;
int nb_dq_pkts;
- uint8_t port_id;
+ uint16_t port_id;
unsigned int lcore_id, previous_lcore_id;
unsigned int free;
@@ -279,8 +279,8 @@ send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
- uint8_t port_id;
- uint8_t dest_port_id;
+ uint16_t port_id;
+ uint16_t dest_port_id;
unsigned int lcore_id, previous_lcore_id;
@@ -324,7 +324,7 @@ main(int argc, char **argv)
int ret;
unsigned int lcore_id, master_lcore_id, last_lcore_id;
- uint8_t port_id;
+ uint16_t port_id;
rte_log_set_global_level(RTE_LOG_INFO);
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 8c8e3116..ebed7b2f 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -45,7 +45,7 @@ extern int *quota;
extern unsigned int *low_watermark;
extern unsigned int *high_watermark;
-extern uint8_t port_pairs[RTE_MAX_ETHPORTS];
+extern uint16_t port_pairs[RTE_MAX_ETHPORTS];
extern struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
extern struct rte_mempool *mbuf_pool;
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 66992405..ca135d21 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -59,7 +59,7 @@ static struct {
static uint16_t
-add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
uint16_t max_pkts __rte_unused, void *_ __rte_unused)
{
@@ -72,7 +72,7 @@ add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
}
static uint16_t
-calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
{
uint64_t cycles = 0;
@@ -97,7 +97,7 @@ calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -159,7 +159,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- uint8_t port;
+ uint16_t port;
for (port = 0; port < nb_ports; port++)
if (rte_eth_dev_socket_id(port) > 0 &&
@@ -195,7 +195,7 @@ int
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
int ret = rte_eal_init(argc, argv);
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 86e57c89..5aa1258e 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -57,7 +57,6 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
@@ -77,7 +76,7 @@ static uint8_t node_id;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
-static uint8_t output_ports[RTE_MAX_ETHPORTS];
+static uint16_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -154,7 +153,7 @@ static void
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
- uint8_t port_id = (uintptr_t)userdata;
+ uint16_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
@@ -165,7 +164,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
}
static void
-configure_tx_buffer(uint8_t port_id, uint16_t size)
+configure_tx_buffer(uint16_t port_id, uint16_t size)
{
int ret;
@@ -174,16 +173,17 @@ configure_tx_buffer(uint8_t port_id, uint16_t size)
RTE_ETH_TX_BUFFER_SIZE(size), 0,
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx "
- "on port %u\n", (unsigned int) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot allocate buffer for tx on port %u\n", port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned int) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
/*
@@ -282,8 +282,8 @@ static inline void
transmit_packet(struct rte_mbuf *buf)
{
int sent;
- const uint8_t in_port = buf->port;
- const uint8_t out_port = output_ports[in_port];
+ const uint16_t in_port = buf->port;
+ const uint16_t out_port = output_ports[in_port];
struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
@@ -381,7 +381,7 @@ main(int argc, char *argv[])
for (;;) {
uint16_t rx_pkts = PKT_READ_SIZE;
- uint8_t port;
+ uint16_t port;
/*
* Try dequeuing max possible packets first, if that fails,
diff --git a/examples/server_node_efd/server/Makefile b/examples/server_node_efd/server/Makefile
index a2f2f361..9f1fe289 100644
--- a/examples/server_node_efd/server/Makefile
+++ b/examples/server_node_efd/server/Makefile
@@ -49,7 +49,7 @@ APP = server
# all source are stored in SRCS-y
SRCS-y := main.c init.c args.c
-INC := $(wildcard *.h)
+INC := $(sort $(wildcard *.h))
CFLAGS += $(WERROR_FLAGS) -O3
CFLAGS += -I$(SRCDIR)/../shared
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index d114e5bf..0bcab8cc 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -56,7 +56,6 @@
#include <rte_memcpy.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -121,7 +120,7 @@ init_mbuf_pools(void)
* - start the port and report its status to stdout
*/
static int
-init_port(uint8_t port_num)
+init_port(uint16_t port_num)
{
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
@@ -136,7 +135,7 @@ init_port(uint8_t port_num)
uint16_t q;
int retval;
- printf("Port %u init ... ", (unsigned int)port_num);
+ printf("Port %u init ... ", port_num);
fflush(stdout);
/*
@@ -255,11 +254,12 @@ populate_efd_table(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -274,14 +274,15 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", info->id[portid],
- (unsigned int)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ info->id[portid],
+ link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
printf("Port %d Link Down\n",
- (uint8_t)info->id[portid]);
+ info->id[portid]);
continue;
}
/* clear all_ports_up flag if any link down */
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index dcdc0a48..aa1c6f57 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -44,7 +44,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -59,7 +58,6 @@
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
@@ -95,7 +93,7 @@ struct efd_stats {
static struct node_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
diff --git a/examples/server_node_efd/shared/common.h b/examples/server_node_efd/shared/common.h
index 8a134799..b1e0abe5 100644
--- a/examples/server_node_efd/shared/common.h
+++ b/examples/server_node_efd/shared/common.h
@@ -65,9 +65,9 @@ struct filter_stats {
struct shared_info {
uint8_t num_nodes;
- uint8_t num_ports;
+ uint16_t num_ports;
uint32_t num_flows;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t id[RTE_MAX_ETHPORTS];
struct rx_stats rx_stats;
struct tx_stats tx_stats[MAX_NODES];
struct filter_stats filter_stats[MAX_NODES];
diff --git a/examples/vhost_xen/Makefile b/examples/service_cores/Makefile
index ad2466aa..bd4a345d 100644
--- a/examples/vhost_xen/Makefile
+++ b/examples/service_cores/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -39,14 +38,17 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
-APP = vhost-switch
+APP = service_cores
# all source are stored in SRCS-y
-SRCS-y := main.c vhost_monitor.c xenstore_parse.c
+SRCS-y := main.c
-CFLAGS += -O2 -I/usr/local/include -D_FILE_OFFSET_BITS=64 -Wno-unused-parameter
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D_GNU_SOURCE
-LDFLAGS += -lxenstore
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/service_cores/main.c b/examples/service_cores/main.c
new file mode 100644
index 00000000..b617a789
--- /dev/null
+++ b/examples/service_cores/main.c
@@ -0,0 +1,246 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+
+/* allow application scheduling of the services */
+#include <rte_service.h>
+
+/* Allow application registration of its own services. An application does not
+ * have to register services, but it can be useful if it wishes to run a
+ * function on a core that is otherwise in use as a service core. In this
+ * example, all services are dummy services registered by the sample app itself.
+ */
+#include <rte_service_component.h>
+
+#define PROFILE_CORES_MAX 8
+#define PROFILE_SERVICE_PER_CORE 5
+
+/* dummy function to do "work" */
+static int32_t service_func(void *args)
+{
+ RTE_SET_USED(args);
+ rte_delay_us(2000);
+ return 0;
+}
+
+static struct rte_service_spec services[] = {
+ {"service_1", service_func, NULL, 0, 0},
+ {"service_2", service_func, NULL, 0, 0},
+ {"service_3", service_func, NULL, 0, 0},
+ {"service_4", service_func, NULL, 0, 0},
+ {"service_5", service_func, NULL, 0, 0},
+};
+#define NUM_SERVICES RTE_DIM(services)
+
+/* this struct holds the mapping of a particular core to all services */
+struct profile_for_core {
+ uint32_t mapped_services[PROFILE_SERVICE_PER_CORE];
+};
+
+/* struct that can be applied as the service core mapping. Items in this
+ * struct will be passed to the ordinary rte_service_* APIs to configure the
+ * service cores at runtime, based on the requirements.
+ *
+ * These profiles can be considered a "configuration" for the service cores,
+ * where switching profile just changes the number of cores and the mappings
+ * for each of them. As a result, the core requirements and performance of the
+ * application scales.
+ */
+struct profile {
+ char name[64];
+ uint32_t num_cores;
+ struct profile_for_core cores[PROFILE_CORES_MAX];
+};
+
+static struct profile profiles[] = {
+ /* profile 0: high performance */
+ {
+ .name = "High Performance",
+ .num_cores = 5,
+ .cores[0] = {.mapped_services = {1, 0, 0, 0, 0} },
+ .cores[1] = {.mapped_services = {0, 1, 0, 0, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 1, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 1, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 1} },
+ },
+ /* profile 1: mid performance with single service priority */
+ {
+ .name = "Mid-High Performance",
+ .num_cores = 3,
+ .cores[0] = {.mapped_services = {1, 1, 0, 0, 0} },
+ .cores[1] = {.mapped_services = {0, 0, 1, 1, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 1} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+ /* profile 2: mid performance with single service priority */
+ {
+ .name = "Mid-Low Performance",
+ .num_cores = 2,
+ .cores[0] = {.mapped_services = {1, 1, 1, 0, 0} },
+ .cores[1] = {.mapped_services = {1, 1, 0, 1, 1} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+ /* profile 3: scale down performance on single core */
+ {
+ .name = "Scale down performance",
+ .num_cores = 1,
+ .cores[0] = {.mapped_services = {1, 1, 1, 1, 1} },
+ .cores[1] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+};
+#define NUM_PROFILES RTE_DIM(profiles)
+
+static void
+apply_profile(int profile_id)
+{
+ uint32_t i;
+ uint32_t s;
+ int ret;
+ struct profile *p = &profiles[profile_id];
+ const uint8_t core_off = 1;
+
+ for (i = 0; i < p->num_cores; i++) {
+ uint32_t core = i + core_off;
+ ret = rte_service_lcore_add(core);
+ if (ret && ret != -EALREADY)
+ printf("core %d added ret %d\n", core, ret);
+
+ ret = rte_service_lcore_start(core);
+ if (ret && ret != -EALREADY)
+ printf("core %d start ret %d\n", core, ret);
+
+ for (s = 0; s < NUM_SERVICES; s++) {
+ if (rte_service_map_lcore_set(s, core,
+ p->cores[i].mapped_services[s]))
+ printf("failed to map lcore %d\n", core);
+ }
+ }
+
+ for ( ; i < PROFILE_CORES_MAX; i++) {
+ uint32_t core = i + core_off;
+ for (s = 0; s < NUM_SERVICES; s++) {
+ ret = rte_service_map_lcore_set(s, core, 0);
+ if (ret && ret != -EINVAL) {
+ printf("%s %d: map lcore set = %d\n", __func__,
+ __LINE__, ret);
+ }
+ }
+ ret = rte_service_lcore_stop(core);
+ if (ret && ret != -EALREADY) {
+ printf("%s %d: lcore stop = %d\n", __func__,
+ __LINE__, ret);
+ }
+ ret = rte_service_lcore_del(core);
+ if (ret && ret != -EINVAL) {
+ printf("%s %d: lcore del = %d\n", __func__,
+ __LINE__, ret);
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Cannot init EAL\n");
+
+ uint32_t i;
+ for (i = 0; i < NUM_SERVICES; i++) {
+ services[i].callback_userdata = 0;
+ uint32_t id;
+ ret = rte_service_component_register(&services[i], &id);
+ if (ret)
+ rte_exit(-1, "service register() failed");
+
+ /* set the service itself to be ready to run. In the case of
+ * ethdev, eventdev etc PMDs, this will be set when the
+ * appropriate configure or setup function is called.
+ */
+ rte_service_component_runstate_set(id, 1);
+
+ /* Collect statistics for the service */
+ rte_service_set_stats_enable(id, 1);
+
+ /* the application sets the service to be active. Note that the
+ * previous component_runstate_set() is the PMD indicating
+ * ready, while this function is the application setting the
+ * service to run. Applications can choose to not run a service
+ * by setting runstate to 0 at any time.
+ */
+ ret = rte_service_runstate_set(id, 1);
+ if (ret)
+ return -ENOEXEC;
+ }
+
+ i = 0;
+ while (1) {
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+ printf("%s%s", clr, topLeft);
+
+ apply_profile(i);
+ printf("\n==> Profile: %s\n\n", profiles[i].name);
+
+ sleep(1);
+ rte_service_dump(stdout, UINT32_MAX);
+
+ sleep(5);
+ rte_service_dump(stdout, UINT32_MAX);
+
+ i++;
+ if (i >= NUM_PROFILES)
+ i = 0;
+ }
+
+ return 0;
+}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index b4d50de8..e623754c 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -57,7 +57,7 @@ static const struct rte_eth_conf port_conf_default = {
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -104,7 +104,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -122,8 +122,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
@@ -179,7 +179,7 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
@@ -204,7 +204,7 @@ main(int argc, char *argv[])
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index aee36c6e..a14d2e37 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -98,7 +98,7 @@
#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
+ sizeof(struct rte_mbuf)))
-#define INVALID_PORT_ID 0xFF
+#define INVALID_PORT_ID 0xFFFF
/* Size of buffers used for snprintfs. */
#define MAX_PRINT_BUFF 6072
@@ -184,7 +184,7 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
static unsigned lcore_ids[RTE_MAX_LCORE];
-uint8_t ports[RTE_MAX_ETHPORTS];
+uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned nb_ports; /**< The number of ports specified in command line */
@@ -1161,7 +1161,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_nb_ports;
int ret;
- uint8_t portid;
+ uint16_t portid;
uint16_t queue_id;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 050bb32d..1ad4ca3c 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -129,7 +129,7 @@ const uint16_t tenant_id_conf[] = {
* coming from the mbuf_pool passed as parameter
*/
int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t q;
@@ -202,7 +202,7 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &ports_eth_addr[port]);
RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
ports_eth_addr[port].addr_bytes[0],
ports_eth_addr[port].addr_bytes[1],
ports_eth_addr[port].addr_bytes[2],
@@ -414,7 +414,7 @@ vxlan_unlink(struct vhost_dev *vdev)
/* Transmit packets after encapsulating */
int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
int ret = 0;
uint16_t i;
diff --git a/examples/tep_termination/vxlan_setup.h b/examples/tep_termination/vxlan_setup.h
index 8d264619..2e3550d3 100644
--- a/examples/tep_termination/vxlan_setup.h
+++ b/examples/tep_termination/vxlan_setup.h
@@ -37,14 +37,14 @@
extern uint16_t nb_devices;
extern uint16_t udp_port;
extern uint8_t filter_idx;
-extern uint8_t ports[RTE_MAX_ETHPORTS];
+extern uint16_t ports[RTE_MAX_ETHPORTS];
extern struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
extern uint32_t enable_stats;
extern struct device_statistics dev_statistics[MAX_DEVICES];
extern uint8_t rx_decap;
extern uint8_t tx_encap;
-typedef int (*ol_port_configure_t)(uint8_t port,
+typedef int (*ol_port_configure_t)(uint16_t port,
struct rte_mempool *mbuf_pool);
typedef int (*ol_tunnel_setup_t)(struct vhost_dev *vdev,
@@ -52,7 +52,7 @@ typedef int (*ol_tunnel_setup_t)(struct vhost_dev *vdev,
typedef void (*ol_tunnel_destroy_t)(struct vhost_dev *vdev);
-typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
+typedef int (*ol_tx_handle_t)(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
@@ -70,7 +70,7 @@ struct ol_switch_ops {
};
int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool);
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool);
int
vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m);
@@ -79,7 +79,7 @@ void
vxlan_unlink(struct vhost_dev *vdev);
int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
int
vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
diff --git a/examples/timer/main.c b/examples/timer/main.c
index 37ad559e..444e9cc1 100644
--- a/examples/timer/main.c
+++ b/examples/timer/main.c
@@ -39,7 +39,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 4d1589d0..89a61f0e 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -177,7 +177,7 @@ static struct rte_eth_conf vmdq_conf_default = {
};
static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0; /**< The number of ports specified in command line */
static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
@@ -265,7 +265,7 @@ validate_num_devices(uint32_t max_nb_devices)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port)
+port_init(uint16_t port)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf;
@@ -392,7 +392,7 @@ port_init(uint8_t port)
RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
vmdq_ports_eth_addr[port].addr_bytes[0],
vmdq_ports_eth_addr[port].addr_bytes[1],
vmdq_ports_eth_addr[port].addr_bytes[2],
@@ -667,7 +667,7 @@ us_vhost_parse_args(int argc, char **argv)
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (enabled_port_mask & (1 << i))
- ports[num_ports++] = (uint8_t)i;
+ ports[num_ports++] = i;
}
if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
@@ -1443,7 +1443,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret, i;
- uint8_t portid;
+ uint16_t portid;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
uint64_t flags = 0;
diff --git a/examples/vhost_scsi/scsi.c b/examples/vhost_scsi/scsi.c
index 54d3104e..fd430ec2 100644
--- a/examples/vhost_scsi/scsi.c
+++ b/examples/vhost_scsi/scsi.c
@@ -307,7 +307,9 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
/* PRODUCT IDENTIFICATION */
- strncpy((char *)inqdata->product_id, bdev->product_name, 16);
+ snprintf((char *)inqdata->product_id,
+ RTE_DIM(inqdata->product_id), "%s",
+ bdev->product_name);
/* PRODUCT REVISION LEVEL */
strncpy((char *)inqdata->product_rev, "0001", 4);
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
deleted file mode 100644
index eba4d35a..00000000
--- a/examples/vhost_xen/main.c
+++ /dev/null
@@ -1,1522 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arpa/inet.h>
-#include <getopt.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/virtio_net.h>
-#include <linux/virtio_ring.h>
-#include <signal.h>
-#include <stdint.h>
-#include <sys/eventfd.h>
-#include <sys/param.h>
-#include <unistd.h>
-
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include <rte_pause.h>
-
-#include "main.h"
-#include "virtio-net.h"
-#include "xen_vhost.h"
-
-#define MAX_QUEUES 128
-
-/* the maximum number of external ports supported */
-#define MAX_SUP_PORTS 1
-
-/*
- * Calculate the number of buffers needed per port
- */
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
- (num_switching_cores*MAX_PKT_BURST) + \
- (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
- (num_switching_cores*MBUF_CACHE_SIZE))
-
-#define MBUF_CACHE_SIZE 64
-
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
-
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
-#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
-/* State of virtio device. */
-#define DEVICE_NOT_READY 0
-#define DEVICE_READY 1
-#define DEVICE_SAFE_REMOVE 2
-
-/* Config_core_flag status definitions. */
-#define REQUEST_DEV_REMOVAL 1
-#define ACK_DEV_REMOVAL 0
-
-/* Configurable number of RX/TX ring descriptors */
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
-
-#define INVALID_PORT_ID 0xFF
-
-/* Max number of devices. Limited by vmdq. */
-#define MAX_DEVICES 64
-
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
-
-
-/* Maximum long option length for option parsing. */
-#define MAX_LONG_OPT_SZ 64
-
-/* Used to compare MAC addresses. */
-#define MAC_ADDR_CMP 0xFFFFFFFFFFFF
-
-/* mask of enabled ports */
-static uint32_t enabled_port_mask = 0;
-
-/*Number of switching cores enabled*/
-static uint32_t num_switching_cores = 0;
-
-/* number of devices/queues to support*/
-static uint32_t num_queues = 0;
-uint32_t num_devices = 0;
-
-/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
-static uint32_t enable_vm2vm = 1;
-/* Enable stats. */
-static uint32_t enable_stats = 0;
-
-/* empty vmdq configuration structure. Filled in programatically */
-static const struct rte_eth_conf vmdq_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- /*
- * It is necessary for 1G NIC such as I350,
- * this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
- */
- .hw_vlan_strip = 1, /**< VLAN strip enabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
- },
-
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- .rx_adv_conf = {
- /*
- * should be overridden separately in code with
- * appropriate values
- */
- .vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
- .enable_default_pool = 0,
- .default_pool = 0,
- .nb_pool_maps = 0,
- .pool_map = {{0, 0},},
- },
- },
-};
-
-static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
-static unsigned num_ports = 0; /**< The number of ports specified in command line */
-
-const uint16_t vlan_tags[] = {
- 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
- 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
- 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
- 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
- 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
- 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
- 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
- 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
-};
-
-/* ethernet addresses of ports */
-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
-
-/* heads for the main used and free linked lists for the data path. */
-static struct virtio_net_data_ll *ll_root_used = NULL;
-static struct virtio_net_data_ll *ll_root_free = NULL;
-
-/* Array of data core structures containing information on individual core linked lists. */
-static struct lcore_info lcore_info[RTE_MAX_LCORE];
-
-/* Used for queueing bursts of TX packets. */
-struct mbuf_table {
- unsigned len;
- unsigned txq_id;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
-/* TX queue for each data core. */
-struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
-
-/* Vlan header struct used to insert vlan tags on TX. */
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-
-/* Header lengths. */
-#define VLAN_HLEN 4
-#define VLAN_ETH_HLEN 18
-
-/* Per-device statistics struct */
-struct device_statistics {
- uint64_t tx_total;
- rte_atomic64_t rx_total;
- uint64_t tx;
- rte_atomic64_t rx;
-} __rte_cache_aligned;
-struct device_statistics dev_statistics[MAX_DEVICES];
-
-/*
- * Builds up the correct configuration for VMDQ VLAN pool map
- * according to the pool & queue limits.
- */
-static inline int
-get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
-{
- struct rte_eth_vmdq_rx_conf conf;
- unsigned i;
-
- memset(&conf, 0, sizeof(conf));
- conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
- conf.nb_pool_maps = num_devices;
-
- for (i = 0; i < conf.nb_pool_maps; i++) {
- conf.pool_map[i].vlan_id = vlan_tags[ i ];
- conf.pool_map[i].pools = (1UL << i);
- }
-
- (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
- (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
- sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
- return 0;
-}
-
-/*
- * Validate the device number according to the max pool number gotten form dev_info
- * If the device number is invalid, give the error message and return -1.
- * Each device must have its own pool.
- */
-static inline int
-validate_num_devices(uint32_t max_nb_devices)
-{
- if (num_devices > max_nb_devices) {
- RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * Initialises a given port using global settings and with the rx buffers
- * coming from the mbuf_pool passed as parameter
- */
-static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_rxconf *rxconf;
- struct rte_eth_conf port_conf;
- uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
- int retval;
- uint16_t q;
-
- /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
- rte_eth_dev_info_get (port, &dev_info);
-
- /*configure the number of supported virtio devices based on VMDQ limits */
- num_devices = dev_info.max_vmdq_pools;
- num_queues = dev_info.max_rx_queues;
-
- retval = validate_num_devices(MAX_DEVICES);
- if (retval < 0)
- return retval;
-
- /* Get port configuration. */
- retval = get_eth_conf(&port_conf, num_devices);
- if (retval < 0)
- return retval;
-
- if (port >= rte_eth_dev_count()) return -1;
-
- rx_rings = (uint16_t)num_queues,
- /* Configure ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
- &tx_ring_size);
- if (retval != 0)
- return retval;
- if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT ||
- tx_ring_size > RTE_TEST_TX_DESC_DEFAULT) {
- RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size for "
- "port %u.\n", port);
- return -1;
- }
-
- rte_eth_dev_info_get(port, &dev_info);
- rxconf = &dev_info.default_rxconf;
- rxconf->rx_drop_en = 1;
- /* Setup the queues. */
- for (q = 0; q < rx_rings; q ++) {
- retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
- rte_eth_dev_socket_id(port), rxconf,
- mbuf_pool);
- if (retval < 0)
- return retval;
- }
- for (q = 0; q < tx_rings; q ++) {
- retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
- rte_eth_dev_socket_id(port),
- NULL);
- if (retval < 0)
- return retval;
- }
-
- /* Start the device. */
- retval = rte_eth_dev_start(port);
- if (retval < 0)
- return retval;
-
- rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
- RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
- RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
- " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
- vmdq_ports_eth_addr[port].addr_bytes[0],
- vmdq_ports_eth_addr[port].addr_bytes[1],
- vmdq_ports_eth_addr[port].addr_bytes[2],
- vmdq_ports_eth_addr[port].addr_bytes[3],
- vmdq_ports_eth_addr[port].addr_bytes[4],
- vmdq_ports_eth_addr[port].addr_bytes[5]);
-
- return 0;
-}
-
-/*
- * Parse the portmask provided at run time.
- */
-static int
-parse_portmask(const char *portmask)
-{
- char *end = NULL;
- unsigned long pm;
-
- errno = 0;
-
- /* parse hexadecimal string */
- pm = strtoul(portmask, &end, 16);
- if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
- return -1;
-
- if (pm == 0)
- return -1;
-
- return pm;
-
-}
-
-/*
- * Parse num options at run time.
- */
-static int
-parse_num_opt(const char *q_arg, uint32_t max_valid_value)
-{
- char *end = NULL;
- unsigned long num;
-
- errno = 0;
-
- /* parse unsigned int string */
- num = strtoul(q_arg, &end, 10);
- if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
- return -1;
-
- if (num > max_valid_value)
- return -1;
-
- return num;
-
-}
-
-/*
- * Display usage
- */
-static void
-us_vhost_usage(const char *prgname)
-{
- RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
- " -p PORTMASK: Set mask for ports to be used by application\n"
- " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
- " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
- prgname);
-}
-
-/*
- * Parse the arguments given in the command line of the application.
- */
-static int
-us_vhost_parse_args(int argc, char **argv)
-{
- int opt, ret;
- int option_index;
- unsigned i;
- const char *prgname = argv[0];
- static struct option long_option[] = {
- {"vm2vm", required_argument, NULL, 0},
- {"stats", required_argument, NULL, 0},
- {NULL, 0, 0, 0}
- };
-
- /* Parse command line */
- while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
- switch (opt) {
- /* Portmask */
- case 'p':
- enabled_port_mask = parse_portmask(optarg);
- if (enabled_port_mask == 0) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
- us_vhost_usage(prgname);
- return -1;
- }
- break;
-
- case 0:
- /* Enable/disable vm2vm comms. */
- if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_vm2vm = ret;
- }
- }
-
- /* Enable/disable stats. */
- if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_stats = ret;
- }
- }
- break;
-
- /* Invalid option - print options. */
- default:
- us_vhost_usage(prgname);
- return -1;
- }
- }
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (enabled_port_mask & (1 << i))
- ports[num_ports++] = (uint8_t)i;
- }
-
- if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
- "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Update the global var NUM_PORTS and array PORTS according to system ports number
- * and return valid ports number
- */
-static unsigned check_ports_num(unsigned nb_ports)
-{
- unsigned valid_num_ports = num_ports;
- unsigned portid;
-
- if (num_ports > nb_ports) {
- RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
- num_ports, nb_ports);
- num_ports = nb_ports;
- }
-
- for (portid = 0; portid < num_ports; portid ++) {
- if (ports[portid] >= nb_ports) {
- RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
- ports[portid], (nb_ports - 1));
- ports[portid] = INVALID_PORT_ID;
- valid_num_ports--;
- }
- }
- return valid_num_ports;
-}
-
-/*
- * Function to convert guest physical addresses to vhost virtual addresses. This
- * is used to convert virtio buffer addresses.
- */
-static __rte_always_inline uint64_t
-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
-{
- struct virtio_memory_regions *region;
- uint32_t regionidx;
- uint64_t vhost_va = 0;
-
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_va = region->address_offset + guest_pa;
- break;
- }
- }
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
- dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
-
- return vhost_va;
-}
-
-/*
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were successfully
- * added to the RX queue.
- */
-static __rte_always_inline uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
- uint64_t buff_addr = 0;
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
- uint16_t free_entries;
- uint8_t success = 0;
- void *userdata;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
- vq = dev->virtqueue_rx;
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
- /* As many data cores may want access to available buffers, they need to be reserved. */
- do {
-
- res_base_idx = vq->last_used_idx_res;
-
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = (avail_idx - res_base_idx);
-
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (count == 0)
- return 0;
-
- res_end_idx = res_base_idx + count;
- /* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
- res_end_idx);
- } while (unlikely(success == 0));
- res_cur_idx = res_base_idx;
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_end_idx);
-
- /* Prefetch available ring to retrieve indexes. */
- rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (res_cur_idx != res_end_idx) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
- /* Prefetch descriptor address. */
- rte_prefetch0(desc);
-
- buff = pkts[packet_success];
-
- /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
-
- {
- /* Copy virtio_hdr to packet and increment buffer address */
- buff_hdr_addr = buff_addr;
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are placed in
- * separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- buff_addr += vq->vhost_hlen;
- desc->len = packet_len;
- }
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
-
- /* Copy mbuf data to buffer */
- userdata = rte_pktmbuf_mtod(buff, void *);
- rte_memcpy((void *)(uintptr_t)buff_addr, userdata, rte_pktmbuf_data_len(buff));
-
- res_cur_idx++;
- packet_success++;
-
- /* mergeable is disabled then a header is required per buffer. */
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);
- if (res_cur_idx < res_end_idx) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
- *(volatile uint16_t *)&vq->used->idx += count;
-
- vq->last_used_idx = res_end_idx;
-
- return count;
-}
-
-/*
- * Compares a packet destination MAC address to a device MAC address.
- */
-static __rte_always_inline int
-ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
-{
- return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
-}
-
-/*
- * This function registers mac along with a
- * vlan tag to a VMDQ.
- */
-static int
-link_vmdq(struct virtio_net *dev)
-{
- int ret;
- struct virtio_net_data_ll *dev_ll;
-
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
- return -1;
- }
- dev_ll = dev_ll->next;
- }
-
- /* vlan_tag currently uses the device_id. */
- dev->vlan_tag = vlan_tags[dev->device_fh];
- dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
-
- /* Print out VMDQ registration info. */
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
- dev->device_fh,
- dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
- dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
- dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
- dev->vlan_tag);
-
- /* Register the MAC address. */
- ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
- if (ret) {
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
- dev->device_fh);
- return -1;
- }
-
- /* Enable stripping of the vlan tag as we handle routing. */
- rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
-
- rte_compiler_barrier();
- /* Set device as ready for RX. */
- dev->ready = DEVICE_READY;
-
- return 0;
-}
-
-/*
- * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
- * queue before disabling RX on the device.
- */
-static inline void
-unlink_vmdq(struct virtio_net *dev)
-{
- unsigned i = 0;
- unsigned rx_count;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
-
- if (dev->ready == DEVICE_READY) {
- /*clear MAC and VLAN settings*/
- rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
- for (i = 0; i < 6; i++)
- dev->mac_address.addr_bytes[i] = 0;
-
- dev->vlan_tag = 0;
-
- /*Clear out the receive buffers*/
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- while (rx_count) {
- for (i = 0; i < rx_count; i++)
- rte_pktmbuf_free(pkts_burst[i]);
-
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
- }
-
- dev->ready = DEVICE_NOT_READY;
- }
-}
-
-/*
- * Check if the packet destination MAC address is for a local device. If so then put
- * the packet on that devices RX queue. If not then return.
- */
-static __rte_always_inline unsigned
-virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
-{
- struct virtio_net_data_ll *dev_ll;
- struct ether_hdr *pkt_hdr;
- uint64_t ret = 0;
-
- pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
-
- /*get the used devices list*/
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->dev->mac_address)) {
-
- /* Drop the packet if the TX packet is destined for the TX device. */
- if (dev_ll->dev->device_fh == dev->device_fh) {
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "Source and destination MAC addresses are the same. "
- "Dropping packet.\n",
- dev_ll->dev->device_fh);
- return 0;
- }
-
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "MAC address is local\n", dev_ll->dev->device_fh);
-
- if (dev_ll->dev->remove) {
- /*drop the packet if the device is marked for removal*/
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
- "Device is marked for removal\n",
- dev_ll->dev->device_fh);
- } else {
- /*send the packet to the local virtio device*/
- ret = virtio_dev_rx(dev_ll->dev, &m, 1);
- if (enable_stats) {
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx += ret;
- }
- }
-
- return 0;
- }
- dev_ll = dev_ll->next;
- }
-
- return -1;
-}
-
-/*
- * This function routes the TX packet to the correct interface. This may be a local device
- * or the physical port.
- */
-static __rte_always_inline void
-virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
-{
- struct mbuf_table *tx_q;
- struct vlan_ethhdr *vlan_hdr;
- struct rte_mbuf **m_table;
- struct rte_mbuf *mbuf;
- unsigned len, ret;
- const uint16_t lcore_id = rte_lcore_id();
-
- /*check if destination is local VM*/
- if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
- return;
- }
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "MAC address is external\n", dev->device_fh);
-
- /*Add packet to the port tx queue*/
- tx_q = &lcore_tx_queue[lcore_id];
- len = tx_q->len;
-
- /* Allocate an mbuf and populate the structure. */
- mbuf = rte_pktmbuf_alloc(mbuf_pool);
- if(!mbuf)
- return;
-
- mbuf->data_len = m->data_len + VLAN_HLEN;
- mbuf->pkt_len = mbuf->data_len;
-
- /* Copy ethernet header to mbuf. */
- rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
- rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
-
-
- /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
- vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_TCI = htons(vlan_tag);
-
- /* Copy the remaining packet contents to the mbuf. */
- rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN),
- rte_pktmbuf_mtod_offset(m, const void *, ETH_HLEN),
- (m->data_len - ETH_HLEN));
- tx_q->m_table[len] = mbuf;
- len++;
- if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
-
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
- /* Free any buffers not handled by TX and update the port stats. */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
- }
-
- len = 0;
- }
-
- tx_q->len = len;
- return;
-}
-
-static __rte_always_inline void
-virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
-{
- struct rte_mbuf m;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- uint64_t buff_addr = 0;
- uint32_t head[MAX_PKT_BURST];
- uint32_t used_idx;
- uint32_t i;
- uint16_t free_entries, packet_success = 0;
- uint16_t avail_idx;
-
- vq = dev->virtqueue_tx;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- /* If there are no available buffers then return. */
- if (vq->last_used_idx == avail_idx)
- return;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
- dev->device_fh);
-
- /* Prefetch available ring to retrieve head indexes. */
- rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
-
- /*get the number of free entries in the ring*/
- free_entries = avail_idx - vq->last_used_idx;
- free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
- dev->device_fh, free_entries);
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < free_entries; i++)
- head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (packet_success < free_entries) {
- desc = &vq->desc[head[packet_success]];
- /* Prefetch descriptor address. */
- rte_prefetch0(desc);
-
- if (packet_success < (free_entries - 1)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success+1]]);
- }
-
- /* Update used index buffer information. */
- used_idx = vq->last_used_idx & (vq->size - 1);
- vq->used->ring[used_idx].id = head[packet_success];
- vq->used->ring[used_idx].len = 0;
-
- /* Discard first buffer as it is the virtio header */
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
-
- /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.data_len = desc->len;
- m.data_off = 0;
- m.nb_segs = 1;
-
- virtio_tx_route(dev, &m, mbuf_pool, 0);
-
- vq->last_used_idx++;
- packet_success++;
- }
-
- rte_compiler_barrier();
- vq->used->idx += packet_success;
- /* Kick guest if required. */
-}
-
-/*
- * This function is called by each data core. It handles all RX/TX registered with the
- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
- * with all devices in the main linked list.
- */
-static int
-switch_worker(__attribute__((unused)) void *arg)
-{
- struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret, i;
- const uint16_t lcore_id = rte_lcore_id();
- const uint16_t num_cores = (uint16_t)rte_lcore_count();
- uint16_t rx_count = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- tx_q = &lcore_tx_queue[lcore_id];
- for (i = 0; i < num_cores; i ++) {
- if (lcore_ids[i] == lcore_id) {
- tx_q->txq_id = i;
- break;
- }
- }
-
- while(1) {
- cur_tsc = rte_rdtsc();
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
-
- if (tx_q->len) {
- RTE_LOG_DP(DEBUG, VHOST_DATA,
- "TX queue drained after timeout with burst size %u\n",
- tx_q->len);
-
- /*Tx any packets in the queue*/
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
-
- tx_q->len = 0;
- }
-
- prev_tsc = cur_tsc;
-
- }
-
- /*
- * Inform the configuration core that we have exited the linked list and that no devices are
- * in use if requested.
- */
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /*
- * Process devices
- */
- dev_ll = lcore_ll->ll_root_used;
-
- while (dev_ll != NULL) {
- /*get virtio device ID*/
- dev = dev_ll->dev;
-
- if (unlikely(dev->remove)) {
- dev_ll = dev_ll->next;
- unlink_vmdq(dev);
- dev->ready = DEVICE_SAFE_REMOVE;
- continue;
- }
- if (likely(dev->ready == DEVICE_READY)) {
- /*Handle guest RX*/
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- if (rx_count) {
- ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
- if (enable_stats) {
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
- }
- while (likely(rx_count)) {
- rx_count--;
- rte_pktmbuf_free_seg(pkts_burst[rx_count]);
- }
-
- }
- }
-
- if (likely(!dev->remove))
- /*Handle guest TX*/
- virtio_dev_tx(dev, mbuf_pool);
-
- /*move to the next device in the list*/
- dev_ll = dev_ll->next;
- }
- }
-
- return 0;
-}
-
-/*
- * Add an entry to a used linked list. A free entry must first be found in the free linked list
- * using get_data_ll_free_entry();
- */
-static void
-add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- /* Set next as NULL and use a compiler barrier to avoid reordering. */
- ll_dev->next = NULL;
- rte_compiler_barrier();
-
- /* If ll == NULL then this is the first device. */
- if (ll) {
- /* Increment to the tail of the linked list. */
- while ((ll->next != NULL) )
- ll = ll->next;
-
- ll->next = ll_dev;
- } else {
- *ll_root_addr = ll_dev;
- }
-}
-
-/*
- * Remove an entry from a used linked list. The entry must then be added to the free linked list
- * using put_data_ll_free_entry().
- */
-static void
-rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- if (ll_dev == ll)
- *ll_root_addr = ll_dev->next;
- else
- ll_dev_last->next = ll_dev->next;
-}
-
-/*
- * Find and return an entry from the free linked list.
- */
-static struct virtio_net_data_ll *
-get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
- struct virtio_net_data_ll *ll_dev;
-
- if (ll_free == NULL)
- return NULL;
-
- ll_dev = ll_free;
- *ll_root_addr = ll_free->next;
-
- return ll_dev;
-}
-
-/*
- * Place an entry back on to the free linked list.
- */
-static void
-put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
-
- ll_dev->next = ll_free;
- *ll_root_addr = ll_dev;
-}
-
-/*
- * Creates a linked list of a given size.
- */
-static struct virtio_net_data_ll *
-alloc_data_ll(uint32_t size)
-{
- struct virtio_net_data_ll *ll_new;
- uint32_t i;
-
- /* Malloc and then chain the linked list. */
- ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
- if (ll_new == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
- return NULL;
- }
-
- for (i = 0; i < size - 1; i++) {
- ll_new[i].dev = NULL;
- ll_new[i].next = &ll_new[i+1];
- }
- ll_new[i].next = NULL;
-
- return ll_new;
-}
-
-/*
- * Create the main linked list along with each individual cores linked list. A used and a free list
- * are created to manage entries.
- */
-static int
-init_data_ll (void)
-{
- int lcore;
-
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
- if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
- return -1;
- }
-
- lcore_info[lcore].lcore_ll->device_num = 0;
- lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
- lcore_info[lcore].lcore_ll->ll_root_used = NULL;
- if (num_devices % num_switching_cores)
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
- else
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
- }
-
- /* Allocate devices up to a maximum of MAX_DEVICES. */
- ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
-
- return 0;
-}
-/*
- * Remove a device from the specific data core linked list and from the main linked list. The
- * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
- * used.
- */
-static void
-destroy_device (volatile struct virtio_net *dev)
-{
- struct virtio_net_data_ll *ll_lcore_dev_cur;
- struct virtio_net_data_ll *ll_main_dev_cur;
- struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
- struct virtio_net_data_ll *ll_main_dev_last = NULL;
- int lcore;
-
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- /*set the remove flag. */
- dev->remove = 1;
-
- while(dev->ready != DEVICE_SAFE_REMOVE) {
- rte_pause();
- }
-
- /* Search for entry to be removed from lcore ll */
- ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
- while (ll_lcore_dev_cur != NULL) {
- if (ll_lcore_dev_cur->dev == dev) {
- break;
- } else {
- ll_lcore_dev_last = ll_lcore_dev_cur;
- ll_lcore_dev_cur = ll_lcore_dev_cur->next;
- }
- }
-
- /* Search for entry to be removed from main ll */
- ll_main_dev_cur = ll_root_used;
- ll_main_dev_last = NULL;
- while (ll_main_dev_cur != NULL) {
- if (ll_main_dev_cur->dev == dev) {
- break;
- } else {
- ll_main_dev_last = ll_main_dev_cur;
- ll_main_dev_cur = ll_main_dev_cur->next;
- }
- }
-
- if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
- RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
- return;
- }
-
- /* Remove entries from the lcore and main ll. */
- rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
- rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
-
- /* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
- }
-
- /*
- * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
- * they can no longer access the device removed from the linked lists and that the devices
- * are no longer in use.
- */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
- rte_pause();
- }
- }
-
- /* Add the entries back to the lcore and main free ll.*/
- put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
- put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
-
- /* Decrement number of device on the lcore. */
- lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
-
- RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
-}
-
-/*
- * A new device is added to a data core. First the device is added to the main linked list
- * and the allocated to a specific data core.
- */
-static int
-new_device (struct virtio_net *dev)
-{
- struct virtio_net_data_ll *ll_dev;
- int lcore, core_add = 0;
- uint32_t device_num_min = num_devices;
-
- /* Add device to main ll */
- ll_dev = get_data_ll_free_entry(&ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
- "of %d devices per core has been reached\n",
- dev->device_fh, num_devices);
- return -1;
- }
- ll_dev->dev = dev;
- add_data_ll_entry(&ll_root_used, ll_dev);
-
- /*reset ready flag*/
- dev->ready = DEVICE_NOT_READY;
- dev->remove = 0;
-
- /* Find a suitable lcore to add the device. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
- device_num_min = lcore_info[lcore].lcore_ll->device_num;
- core_add = lcore;
- }
- }
- /* Add device to lcore ll */
- ll_dev->dev->coreid = core_add;
- ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
- destroy_device(dev);
- return -1;
- }
- ll_dev->dev = dev;
- add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
-
- /* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
-
- lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
-
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
-
- link_vmdq(dev);
-
- return 0;
-}
-
-/*
- * These callback allow devices to be added to the data core when configuration
- * has been fully complete.
- */
-static const struct virtio_net_device_ops virtio_net_device_ops =
-{
- .new_device = new_device,
- .destroy_device = destroy_device,
-};
-
-/*
- * This is a thread will wake up after a period to print stats if the user has
- * enabled them.
- */
-static void
-print_stats(void)
-{
- struct virtio_net_data_ll *dev_ll;
- uint64_t tx_dropped, rx_dropped;
- uint64_t tx, tx_total, rx, rx_total;
- uint32_t device_fh;
- const char clr[] = { 27, '[', '2', 'J', '\0' };
- const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
-
- while(1) {
- sleep(enable_stats);
-
- /* Clear screen and move to top left */
- printf("%s%s", clr, top_left);
-
- printf("\nDevice statistics ====================================");
-
- dev_ll = ll_root_used;
- while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
- tx_dropped = tx_total - tx;
- rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
- rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
- rx_dropped = rx_total - rx;
-
- printf("\nStatistics for device %"PRIu32" ------------------------------"
- "\nTX total: %"PRIu64""
- "\nTX dropped: %"PRIu64""
- "\nTX successful: %"PRIu64""
- "\nRX total: %"PRIu64""
- "\nRX dropped: %"PRIu64""
- "\nRX successful: %"PRIu64"",
- device_fh,
- tx_total,
- tx_dropped,
- tx,
- rx_total,
- rx_dropped,
- rx);
-
- dev_ll = dev_ll->next;
- }
- printf("\n======================================================\n");
- }
-}
-
-
-int init_virtio_net(struct virtio_net_device_ops const * const ops);
-
-/*
- * Main function, does initialisation and calls the per-lcore functions.
- */
-int
-main(int argc, char *argv[])
-{
- struct rte_mempool *mbuf_pool;
- unsigned lcore_id, core_id = 0;
- unsigned nb_ports, valid_num_ports;
- int ret;
- uint8_t portid;
- static pthread_t tid;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- argc -= ret;
- argv += ret;
-
- /* parse app arguments */
- ret = us_vhost_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid argument\n");
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
- if (rte_lcore_is_enabled(lcore_id))
- lcore_ids[core_id ++] = lcore_id;
-
- if (rte_lcore_count() > RTE_MAX_LCORE)
- rte_exit(EXIT_FAILURE,"Not enough cores\n");
-
- /*set the number of swithcing cores available*/
- num_switching_cores = rte_lcore_count()-1;
-
- /* Get the number of physical ports. */
- nb_ports = rte_eth_dev_count();
-
- /*
- * Update the global var NUM_PORTS and global array PORTS
- * and get value of var VALID_NUM_PORTS according to system ports number
- */
- valid_num_ports = check_ports_num(nb_ports);
-
- if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
- "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
- return -1;
- }
-
- /* Create the mbuf pool. */
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
- NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- /* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0) {
- RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
- continue;
- }
- if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
- }
-
- /* Initialise all linked lists. */
- if (init_data_ll() == -1)
- rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
-
- /* Initialize device stats */
- memset(&dev_statistics, 0, sizeof(dev_statistics));
-
- /* Enable stats if the user option is set. */
- if (enable_stats) {
- ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
- if (ret != 0)
- rte_exit(EXIT_FAILURE,
- "Cannot create print-stats thread\n");
-
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats");
- ret = rte_thread_setname(tid, thread_name);
- if (ret != 0)
- RTE_LOG(DEBUG, VHOST_CONFIG,
- "Cannot set print-stats name\n");
- }
-
- /* Launch all data cores. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
- }
-
- init_virtio_xen(&virtio_net_device_ops);
-
- virtio_monitor_loop();
- return 0;
-}
diff --git a/examples/vhost_xen/vhost_monitor.c b/examples/vhost_xen/vhost_monitor.c
deleted file mode 100644
index fb9606bf..00000000
--- a/examples/vhost_xen/vhost_monitor.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <dirent.h>
-#include <unistd.h>
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <linux/virtio_ring.h>
-#include <linux/virtio_pci.h>
-#include <linux/virtio_net.h>
-
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_string_fns.h>
-
-#include "virtio-net.h"
-#include "xen_vhost.h"
-
-struct virtio_watch {
- struct xs_handle *xs;
- int watch_fd;
-};
-
-
-/* device ops to add/remove device to/from data core. */
-static struct virtio_net_device_ops const *notify_ops;
-
-/* root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll *ll_root = NULL;
-
-/* root address of VM. */
-static struct xen_guestlist guest_root;
-
-static struct virtio_watch watch;
-
-static void
-vq_vring_init(struct vhost_virtqueue *vq, unsigned int num, uint8_t *p,
- unsigned long align)
-{
- vq->size = num;
- vq->desc = (struct vring_desc *) p;
- vq->avail = (struct vring_avail *) (p +
- num * sizeof(struct vring_desc));
- vq->used = (void *)
- RTE_ALIGN_CEIL( (uintptr_t)(&vq->avail->ring[num]), align);
-
-}
-
-static int
-init_watch(void)
-{
- struct xs_handle *xs;
- int ret;
- int fd;
-
- /* get a connection to the daemon */
- xs = xs_daemon_open();
- if (xs == NULL) {
- RTE_LOG(ERR, XENHOST, "xs_daemon_open failed\n");
- return -1;
- }
-
- ret = xs_watch(xs, "/local/domain", "mytoken");
- if (ret == 0) {
- RTE_LOG(ERR, XENHOST, "%s: xs_watch failed\n", __func__);
- xs_daemon_close(xs);
- return -1;
- }
-
- /* We are notified of read availability on the watch via the file descriptor. */
- fd = xs_fileno(xs);
- watch.xs = xs;
- watch.watch_fd = fd;
-
- TAILQ_INIT(&guest_root);
- return 0;
-}
-
-static struct xen_guest *
-get_xen_guest(int dom_id)
-{
- struct xen_guest *guest = NULL;
-
- TAILQ_FOREACH(guest, &guest_root, next) {
- if(guest->dom_id == dom_id)
- return guest;
- }
-
- return NULL;
-}
-
-
-static struct xen_guest *
-add_xen_guest(int32_t dom_id)
-{
- struct xen_guest *guest = NULL;
-
- if ((guest = get_xen_guest(dom_id)) != NULL)
- return guest;
-
- guest = calloc(1, sizeof(struct xen_guest));
- if (guest) {
- RTE_LOG(ERR, XENHOST, " %s: return newly created guest with %d rings\n", __func__, guest->vring_num);
- TAILQ_INSERT_TAIL(&guest_root, guest, next);
- guest->dom_id = dom_id;
- }
-
- return guest;
-}
-
-static void
-cleanup_device(struct virtio_net_config_ll *ll_dev)
-{
- if (ll_dev == NULL)
- return;
- if (ll_dev->dev.virtqueue_rx) {
- rte_free(ll_dev->dev.virtqueue_rx);
- ll_dev->dev.virtqueue_rx = NULL;
- }
- if (ll_dev->dev.virtqueue_tx) {
- rte_free(ll_dev->dev.virtqueue_tx);
- ll_dev->dev.virtqueue_tx = NULL;
- }
- free(ll_dev);
-}
-
-/*
- * Add entry containing a device to the device configuration linked list.
- */
-static void
-add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* If ll_dev == NULL then this is the first device so go to else */
- if (ll_dev) {
- /* If the 1st device_id != 0 then we insert our device here. */
- if (ll_dev->dev.device_fh != 0) {
- new_ll_dev->dev.device_fh = 0;
- new_ll_dev->next = ll_dev;
- ll_root = new_ll_dev;
- } else {
- /* increment through the ll until we find un unused device_id,
- * insert the device at that entry
- */
- while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
- ll_dev = ll_dev->next;
-
- new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
- new_ll_dev->next = ll_dev->next;
- ll_dev->next = new_ll_dev;
- }
- } else {
- ll_root = new_ll_dev;
- ll_root->dev.device_fh = 0;
- }
-}
-
-
-/*
- * Remove an entry from the device configuration linked list.
- */
-static struct virtio_net_config_ll *
-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)
-{
- /* First remove the device and then clean it up. */
- if (ll_dev == ll_root) {
- ll_root = ll_dev->next;
- cleanup_device(ll_dev);
- return ll_root;
- } else {
- ll_dev_last->next = ll_dev->next;
- cleanup_device(ll_dev);
- return ll_dev_last->next;
- }
-}
-
-/*
- * Retrieves an entry from the devices configuration linked list.
- */
-static struct virtio_net_config_ll *
-get_config_ll_entry(unsigned int virtio_idx, unsigned int dom_id)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* Loop through linked list until the dom_id is found. */
- while (ll_dev != NULL) {
- if (ll_dev->dev.dom_id == dom_id && ll_dev->dev.virtio_idx == virtio_idx)
- return ll_dev;
- ll_dev = ll_dev->next;
- }
-
- return NULL;
-}
-
-/*
- * Initialise all variables in device structure.
- */
-static void
-init_dev(struct virtio_net *dev)
-{
- RTE_SET_USED(dev);
-}
-
-
-static struct
-virtio_net_config_ll *new_device(unsigned int virtio_idx, struct xen_guest *guest)
-{
- struct virtio_net_config_ll *new_ll_dev;
- struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
- size_t size, vq_ring_size, vq_size = VQ_DESC_NUM;
- void *vq_ring_virt_mem;
- uint64_t gpa;
- uint32_t i;
-
- /* Setup device and virtqueues. */
- new_ll_dev = calloc(1, sizeof(struct virtio_net_config_ll));
- virtqueue_rx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
- virtqueue_tx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
- if (new_ll_dev == NULL || virtqueue_rx == NULL || virtqueue_tx == NULL)
- goto err;
-
- new_ll_dev->dev.virtqueue_rx = virtqueue_rx;
- new_ll_dev->dev.virtqueue_tx = virtqueue_tx;
- new_ll_dev->dev.dom_id = guest->dom_id;
- new_ll_dev->dev.virtio_idx = virtio_idx;
- /* Initialise device and virtqueues. */
- init_dev(&new_ll_dev->dev);
-
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
- (void)vq_ring_size;
-
- vq_ring_virt_mem = guest->vring[virtio_idx].rxvring_addr;
- vq_vring_init(virtqueue_rx, vq_size, vq_ring_virt_mem, VIRTIO_PCI_VRING_ALIGN);
- virtqueue_rx->size = vq_size;
- virtqueue_rx->vhost_hlen = sizeof(struct virtio_net_hdr);
-
- vq_ring_virt_mem = guest->vring[virtio_idx].txvring_addr;
- vq_vring_init(virtqueue_tx, vq_size, vq_ring_virt_mem, VIRTIO_PCI_VRING_ALIGN);
- virtqueue_tx->size = vq_size;
- memcpy(&new_ll_dev->dev.mac_address, &guest->vring[virtio_idx].addr, sizeof(struct ether_addr));
-
- /* virtio_memory has to be one per domid */
- new_ll_dev->dev.mem = malloc(sizeof(struct virtio_memory) + sizeof(struct virtio_memory_regions) * MAX_XENVIRT_MEMPOOL);
- new_ll_dev->dev.mem->nregions = guest->pool_num;
- for (i = 0; i < guest->pool_num; i++) {
- gpa = new_ll_dev->dev.mem->regions[i].guest_phys_address =
- (uint64_t)((uintptr_t)guest->mempool[i].gva);
- new_ll_dev->dev.mem->regions[i].guest_phys_address_end =
- gpa + guest->mempool[i].mempfn_num * getpagesize();
- new_ll_dev->dev.mem->regions[i].address_offset =
- (uint64_t)((uintptr_t)guest->mempool[i].hva -
- (uintptr_t)gpa);
- }
-
- new_ll_dev->next = NULL;
-
- /* Add entry to device configuration linked list. */
- add_config_ll_entry(new_ll_dev);
- return new_ll_dev;
-err:
- free(new_ll_dev);
- rte_free(virtqueue_rx);
- rte_free(virtqueue_tx);
-
- return NULL;
-}
-
-static void
-destroy_guest(struct xen_guest *guest)
-{
- uint32_t i;
-
- for (i = 0; i < guest->vring_num; i++)
- cleanup_vring(&guest->vring[i]);
- /* clean mempool */
- for (i = 0; i < guest->pool_num; i++)
- cleanup_mempool(&guest->mempool[i]);
- free(guest);
-
- return;
-}
-
-/*
- * This function will cleanup the device and remove it from device configuration linked list.
- */
-static void
-destroy_device(unsigned int virtio_idx, unsigned int dom_id)
-{
- struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
- struct virtio_net_config_ll *ll_dev_cur = ll_root;
-
- /* clean virtio device */
- struct xen_guest *guest = NULL;
- guest = get_xen_guest(dom_id);
- if (guest == NULL)
- return;
-
- /* Find the linked list entry for the device to be removed. */
- ll_dev_cur_ctx = get_config_ll_entry(virtio_idx, dom_id);
- while (ll_dev_cur != NULL) {
- /* If the device is found or a device that doesn't exist is found then it is removed. */
- if (ll_dev_cur == ll_dev_cur_ctx) {
- if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
- notify_ops->destroy_device(&(ll_dev_cur->dev));
- ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
- } else {
- ll_dev_last = ll_dev_cur;
- ll_dev_cur = ll_dev_cur->next;
- }
- }
- RTE_LOG(INFO, XENHOST, " %s guest:%p vring:%p rxvring:%p txvring:%p flag:%p\n",
- __func__, guest, &guest->vring[virtio_idx], guest->vring[virtio_idx].rxvring_addr, guest->vring[virtio_idx].txvring_addr, guest->vring[virtio_idx].flag);
- cleanup_vring(&guest->vring[virtio_idx]);
- guest->vring[virtio_idx].removed = 1;
- guest->vring_num -= 1;
-}
-
-
-
-
-static void
-watch_unmap_event(void)
-{
- int i;
- struct xen_guest *guest = NULL;
- bool remove_request;
-
- TAILQ_FOREACH(guest, &guest_root, next) {
- for (i = 0; i < MAX_VIRTIO; i++) {
- if (guest->vring[i].dom_id && guest->vring[i].removed == 0 && *guest->vring[i].flag == 0) {
- RTE_LOG(INFO, XENHOST, "\n\n");
- RTE_LOG(INFO, XENHOST, " #####%s: (%d, %d) to be removed\n",
- __func__,
- guest->vring[i].dom_id,
- i);
- destroy_device(i, guest->dom_id);
- RTE_LOG(INFO, XENHOST, " %s: DOM %u, vring num: %d\n",
- __func__,
- guest->dom_id,
- guest->vring_num);
- }
- }
- }
-
-_find_next_remove:
- guest = NULL;
- remove_request = false;
- TAILQ_FOREACH(guest, &guest_root, next) {
- if (guest->vring_num == 0) {
- remove_request = true;
- break;
- }
- }
- if (remove_request == true) {
- TAILQ_REMOVE(&guest_root, guest, next);
- RTE_LOG(INFO, XENHOST, " #####%s: destroy guest (%d)\n", __func__, guest->dom_id);
- destroy_guest(guest);
- goto _find_next_remove;
- }
- return;
-}
-
-/*
- * OK, if the guest starts first, it is ok.
- * if host starts first, it is ok.
- * if guest starts, and has run for sometime, and host stops and restarts,
- * then last_used_idx 0? how to solve this. */
-
-static void virtio_init(void)
-{
- uint32_t len, e_num;
- uint32_t i,j;
- char **dom;
- char *status;
- int dom_id;
- char path[PATH_MAX];
- char node[PATH_MAX];
- xs_transaction_t th;
- struct xen_guest *guest;
- struct virtio_net_config_ll *net_config;
- char *end;
- int val;
-
- /* init env for watch the node */
- if (init_watch() < 0)
- return;
-
- dom = xs_directory(watch.xs, XBT_NULL, "/local/domain", &e_num);
-
- for (i = 0; i < e_num; i++) {
- errno = 0;
- dom_id = strtol(dom[i], &end, 0);
- if (errno != 0 || end == NULL || dom_id == 0)
- continue;
-
- for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
- snprintf(node, PATH_MAX, "%s%d", VIRTIO_START, j);
- snprintf(path, PATH_MAX, XEN_VM_NODE_FMT,
- dom_id, node);
-
- th = xs_transaction_start(watch.xs);
- status = xs_read(watch.xs, th, path, &len);
- xs_transaction_end(watch.xs, th, false);
-
- if (status == NULL)
- break;
-
- /* if there's any valid virtio device */
- errno = 0;
- val = strtol(status, &end, 0);
- if (errno != 0 || end == NULL || dom_id == 0)
- val = 0;
- if (val == 1) {
- guest = add_xen_guest(dom_id);
- if (guest == NULL)
- continue;
- RTE_LOG(INFO, XENHOST, " there's a new virtio existed, new a virtio device\n\n");
-
- RTE_LOG(INFO, XENHOST, " parse_vringnode dom_id %d virtioidx %d\n",dom_id,j);
- if (parse_vringnode(guest, j)) {
- RTE_LOG(ERR, XENHOST, " there is invalid information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
-
- continue;
- }
-
- /*if pool_num > 0, then mempool has already been parsed*/
- if (guest->pool_num == 0 && parse_mempoolnode(guest)) {
- RTE_LOG(ERR, XENHOST, " there is error information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
-
- net_config = new_device(j, guest);
- /* every thing is ready now, added into data core */
- notify_ops->new_device(&net_config->dev);
- }
- }
- }
-
- free(dom);
- return;
-}
-
-void
-virtio_monitor_loop(void)
-{
- char **vec;
- xs_transaction_t th;
- char *buf;
- unsigned int len;
- unsigned int dom_id;
- uint32_t virtio_idx;
- struct xen_guest *guest;
- struct virtio_net_config_ll *net_config;
- enum fieldnames {
- FLD_NULL = 0,
- FLD_LOCAL,
- FLD_DOMAIN,
- FLD_ID,
- FLD_CONTROL,
- FLD_DPDK,
- FLD_NODE,
- _NUM_FLD
- };
- char *str_fld[_NUM_FLD];
- char *str;
- char *end;
-
- virtio_init();
- while (1) {
- watch_unmap_event();
-
- usleep(50);
- vec = xs_check_watch(watch.xs);
-
- if (vec == NULL)
- continue;
-
- th = xs_transaction_start(watch.xs);
-
- buf = xs_read(watch.xs, th, vec[XS_WATCH_PATH],&len);
- xs_transaction_end(watch.xs, th, false);
-
- if (buf) {
- /* theres' some node for vhost existed */
- if (rte_strsplit(vec[XS_WATCH_PATH], strnlen(vec[XS_WATCH_PATH], PATH_MAX),
- str_fld, _NUM_FLD, '/') == _NUM_FLD) {
- if (strstr(str_fld[FLD_NODE], VIRTIO_START)) {
- errno = 0;
- str = str_fld[FLD_ID];
- dom_id = strtoul(str, &end, 0);
- if (errno != 0 || end == NULL || end == str ) {
- RTE_LOG(INFO, XENHOST, "invalid domain id\n");
- continue;
- }
-
- errno = 0;
- str = str_fld[FLD_NODE] + sizeof(VIRTIO_START) - 1;
- virtio_idx = strtoul(str, &end, 0);
- if (errno != 0 || end == NULL || end == str
- || virtio_idx > MAX_VIRTIO) {
- RTE_LOG(INFO, XENHOST, "invalid virtio idx\n");
- continue;
- }
- RTE_LOG(INFO, XENHOST, " #####virtio dev (%d, %d) is started\n", dom_id, virtio_idx);
-
- guest = add_xen_guest(dom_id);
- if (guest == NULL)
- continue;
- guest->dom_id = dom_id;
- if (parse_vringnode(guest, virtio_idx)) {
- RTE_LOG(ERR, XENHOST, " there is invalid information in xenstore\n");
- /*guest newly created? guest existed ?*/
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
- /*if pool_num > 0, then mempool has already been parsed*/
- if (guest->pool_num == 0 && parse_mempoolnode(guest)) {
- RTE_LOG(ERR, XENHOST, " there is error information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
-
-
- net_config = new_device(virtio_idx, guest);
- RTE_LOG(INFO, XENHOST, " Add to dataplane core\n");
- notify_ops->new_device(&net_config->dev);
-
- }
- }
- }
-
- free(vec);
- }
- return;
-}
-
-/*
- * Register ops so that we can add/remove device to data core.
- */
-int
-init_virtio_xen(struct virtio_net_device_ops const *const ops)
-{
- notify_ops = ops;
- if (xenhost_init())
- return -1;
- return 0;
-}
diff --git a/examples/vhost_xen/virtio-net.h b/examples/vhost_xen/virtio-net.h
deleted file mode 100644
index ab697260..00000000
--- a/examples/vhost_xen/virtio-net.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTIO_NET_H_
-#define _VIRTIO_NET_H_
-
-#include <stdint.h>
-
-#define VQ_DESC_NUM 256
-/* Used to indicate that the device is running on a data core */
-#define VIRTIO_DEV_RUNNING 1
-
-/*
- * Structure contains variables relevant to TX/RX virtqueues.
- */
-struct vhost_virtqueue
-{
- struct vring_desc *desc; /* Virtqueue descriptor ring. */
- struct vring_avail *avail; /* Virtqueue available ring. */
- struct vring_used *used; /* Virtqueue used ring. */
- uint32_t size; /* Size of descriptor ring. */
- uint32_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */
- volatile uint16_t last_used_idx; /* Last index used on the available ring */
- volatile uint16_t last_used_idx_res; /* Used for multiple devices reserving buffers. */
-} __rte_cache_aligned;
-
-/*
- * Device structure contains all configuration information relating to the device.
- */
-struct virtio_net
-{
- struct vhost_virtqueue *virtqueue_tx; /* Contains all TX virtqueue information. */
- struct vhost_virtqueue *virtqueue_rx; /* Contains all RX virtqueue information. */
- struct virtio_memory *mem; /* QEMU memory and memory region information. */
- struct ether_addr mac_address; /* Device MAC address (Obtained on first TX packet). */
- uint32_t flags; /* Device flags. Only used to check if device is running on data core. */
- uint32_t vlan_tag; /* Vlan tag for device. Currently set to device_id (0-63). */
- uint32_t vmdq_rx_q;
- uint64_t device_fh; /* device identifier. */
- uint16_t coreid;
- volatile uint8_t ready; /* A device is set as ready if the MAC address has been set. */
- volatile uint8_t remove; /* Device is marked for removal from the data core. */
- uint32_t virtio_idx; /* Index of virtio device */
- uint32_t dom_id; /* Domain id of xen guest */
-} ___rte_cache_aligned;
-
-/*
- * Device linked list structure for configuration.
- */
-struct virtio_net_config_ll
-{
- struct virtio_net dev; /* Virtio device. */
- struct virtio_net_config_ll *next; /* Next entry on linked list. */
-};
-
-/*
- * Information relating to memory regions including offsets to addresses in QEMUs memory file.
- */
-struct virtio_memory_regions {
- uint64_t guest_phys_address; /* Base guest physical address of region. */
- uint64_t guest_phys_address_end; /* End guest physical address of region. */
- uint64_t memory_size; /* Size of region. */
- uint64_t userspace_address; /* Base userspace address of region. */
- uint64_t address_offset; /* Offset of region for address translation. */
-};
-
-/*
- * Memory structure includes region and mapping information.
- */
-struct virtio_memory {
- uint32_t nregions; /* Number of memory regions. */
- struct virtio_memory_regions regions[0]; /* Memory region information. */
-};
-
-/*
- * Device operations to add/remove device.
- */
-struct virtio_net_device_ops {
- int (* new_device)(struct virtio_net *); /* Add device. */
- void (* destroy_device) (volatile struct virtio_net *); /* Remove device. */
-};
-
-#endif
diff --git a/examples/vhost_xen/xen_vhost.h b/examples/vhost_xen/xen_vhost.h
deleted file mode 100644
index 2fc304c7..00000000
--- a/examples/vhost_xen/xen_vhost.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _XEN_VHOST_H_
-#define _XEN_VHOST_H_
-
-#include <stdint.h>
-
-#include <rte_ether.h>
-
-#include "virtio-net.h"
-
-#define RTE_LOGTYPE_XENHOST RTE_LOGTYPE_USER1
-
-#define XEN_VM_ROOTNODE_FMT "/local/domain/%d/control/dpdk"
-#define XEN_VM_NODE_FMT "/local/domain/%d/control/dpdk/%s"
-#define XEN_MEMPOOL_SUFFIX "mempool_gref"
-#define XEN_RXVRING_SUFFIX "rx_vring_gref"
-#define XEN_TXVRING_SUFFIX "tx_vring_gref"
-#define XEN_GVA_SUFFIX "mempool_va"
-#define XEN_VRINGFLAG_SUFFIX "vring_flag"
-#define XEN_ADDR_SUFFIX "ether_addr"
-#define VIRTIO_START "event_type_start_"
-
-#define XEN_GREF_SPLITTOKEN ','
-
-#define MAX_XENVIRT_MEMPOOL 16
-#define MAX_VIRTIO 32
-#define MAX_GREF_PER_NODE 64 /* 128 MB memory */
-
-#define PAGE_SIZE 4096
-#define PAGE_PFNNUM (PAGE_SIZE / sizeof(uint32_t))
-
-#define XEN_GNTDEV_FNAME "/dev/xen/gntdev"
-
-/* xen grant reference info in one grant node */
-struct xen_gnt {
- uint32_t gref; /* grant reference for this node */
- union {
- int gref; /* grant reference */
- uint32_t pfn_num; /* guest pfn number of grant reference */
- } gref_pfn[PAGE_PFNNUM];
-}__attribute__((__packed__));
-
-
-/* structure for mempool or vring node list */
-struct xen_gntnode {
- uint32_t gnt_num; /* grant reference number */
- struct xen_gnt *gnt_info; /* grant reference info */
-};
-
-
-struct xen_vring {
- uint32_t dom_id;
- uint32_t virtio_idx; /* index of virtio device */
- void *rxvring_addr; /* mapped virtual address of rxvring */
- void *txvring_addr; /* mapped virtual address of txvring */
- uint32_t rxpfn_num; /* number of gpfn for rxvring */
- uint32_t txpfn_num; /* number of gpfn for txvring */
- uint32_t *rxpfn_tbl; /* array of rxvring gpfn */
- uint32_t *txpfn_tbl; /* array of txvring gpfn */
- uint64_t *rx_pindex; /* index used to release rx grefs */
- uint64_t *tx_pindex; /* index used to release tx grefs */
- uint64_t flag_index;
- uint8_t *flag; /* cleared to zero on guest unmap */
- struct ether_addr addr; /* ethernet address of virtio device */
- uint8_t removed;
-
-};
-
-struct xen_mempool {
- uint32_t dom_id; /* guest domain id */
- uint32_t pool_idx; /* index of memory pool */
- void *gva; /* guest virtual address of mbuf pool */
- void *hva; /* host virtual address of mbuf pool */
- uint32_t mempfn_num; /* number of gpfn for mbuf pool */
- uint32_t *mempfn_tbl; /* array of mbuf pool gpfn */
- uint64_t *pindex; /* index used to release grefs */
-};
-
-struct xen_guest {
- TAILQ_ENTRY(xen_guest) next;
- int32_t dom_id; /* guest domain id */
- uint32_t pool_num; /* number of mbuf pool of the guest */
- uint32_t vring_num; /* number of virtio ports of the guest */
- /* array contain the guest mbuf pool info */
- struct xen_mempool mempool[MAX_XENVIRT_MEMPOOL];
- /* array contain the guest rx/tx vring info */
- struct xen_vring vring[MAX_VIRTIO];
-};
-
-TAILQ_HEAD(xen_guestlist, xen_guest);
-
-int
-parse_mempoolnode(struct xen_guest *guest);
-
-int
-xenhost_init(void);
-
-int
-parse_vringnode(struct xen_guest *guest, uint32_t virtio_idx);
-
-int
-parse_mempoolnode(struct xen_guest *guest);
-
-void
-cleanup_mempool(struct xen_mempool *mempool);
-
-void
-cleanup_vring(struct xen_vring *vring);
-
-void
-virtio_monitor_loop(void);
-
-int
-init_virtio_xen(struct virtio_net_device_ops const * const);
-
-#endif
diff --git a/examples/vhost_xen/xenstore_parse.c b/examples/vhost_xen/xenstore_parse.c
deleted file mode 100644
index ab089f1b..00000000
--- a/examples/vhost_xen/xenstore_parse.c
+++ /dev/null
@@ -1,775 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <xen/sys/gntalloc.h>
-#include <xen/sys/gntdev.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-
-#include <rte_common.h>
-#include <rte_memory.h>
-#include <rte_eal.h>
-#include <rte_malloc.h>
-#include <rte_string_fns.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-
-#include "xen_vhost.h"
-
-/* xenstore handle */
-static struct xs_handle *xs = NULL;
-
-/* gntdev file descriptor to map grant pages */
-static int d_fd = -1;
-
-/*
- * The grant node format in xenstore for vring/mpool is like:
- * idx#_rx_vring_gref = "gref1#, gref2#, gref3#"
- * idx#_mempool_gref = "gref1#, gref2#, gref3#"
- * each gref# is the grant reference for a shared page.
- * In each shared page, we store the grant_node_item items.
- */
-struct grant_node_item {
- uint32_t gref;
- uint32_t pfn;
-} __attribute__((packed));
-
-int cmdline_parse_etheraddr(void *tk, const char *srcbuf,
- void *res, unsigned ressize);
-
-/* Map grant ref refid at addr_ori*/
-static void *
-xen_grant_mmap(void *addr_ori, int domid, int refid, uint64_t *pindex)
-{
- struct ioctl_gntdev_map_grant_ref arg;
- void *addr = NULL;
- int pg_sz = getpagesize();
-
- arg.count = 1;
- arg.refs[0].domid = domid;
- arg.refs[0].ref = refid;
-
- int rv = ioctl(d_fd, IOCTL_GNTDEV_MAP_GRANT_REF, &arg);
- if (rv) {
- RTE_LOG(ERR, XENHOST, " %s: (%d,%d) %s (ioctl failed)\n", __func__,
- domid, refid, strerror(errno));
- return NULL;
- }
-
- if (addr_ori == NULL)
- addr = mmap(addr_ori, pg_sz, PROT_READ|PROT_WRITE, MAP_SHARED,
- d_fd, arg.index);
- else
- addr = mmap(addr_ori, pg_sz, PROT_READ|PROT_WRITE, MAP_SHARED | MAP_FIXED,
- d_fd, arg.index);
-
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, XENHOST, " %s: (%d, %d) %s (map failed)\n", __func__,
- domid, refid, strerror(errno));
- return NULL;
- }
-
- if (pindex)
- *pindex = arg.index;
-
- return addr;
-}
-
-/* Unmap one grant ref, and munmap must be called before this */
-static int
-xen_unmap_grant_ref(uint64_t index)
-{
- struct ioctl_gntdev_unmap_grant_ref arg;
- int rv;
-
- arg.count = 1;
- arg.index = index;
- rv = ioctl(d_fd, IOCTL_GNTDEV_UNMAP_GRANT_REF, &arg);
- if (rv) {
- RTE_LOG(ERR, XENHOST, " %s: index 0x%" PRIx64 "unmap failed\n", __func__, index);
- return -1;
- }
- return 0;
-}
-
-/*
- * Reserve a virtual address space.
- * On success, returns the pointer. On failure, returns NULL.
- */
-static void *
-get_xen_virtual(size_t size, size_t page_sz)
-{
- void *addr;
- uintptr_t aligned_addr;
-
- addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, XENHOST, "failed get a virtual area\n");
- return NULL;
- }
-
- aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
- munmap(addr, aligned_addr - (uintptr_t)addr);
- munmap((void *)(aligned_addr + size), page_sz + (uintptr_t)addr - aligned_addr);
- addr = (void *)(aligned_addr);
-
- return addr;
-}
-
-static void
-free_xen_virtual(void *addr, size_t size, size_t page_sz __rte_unused)
-{
- if (addr)
- munmap(addr, size);
-}
-
-/*
- * Returns val str in xenstore.
- * @param path
- * Full path string for key
- * @return
- * Pointer to Val str, NULL on failure
- */
-static char *
-xen_read_node(char *path, uint32_t *len)
-{
- char *buf;
-
- buf = xs_read(xs, XBT_NULL, path, len);
- return buf;
-}
-
-static int
-cal_pagenum(struct xen_gnt *gnt)
-{
- unsigned int i;
- /*
- * the items in the page are in the format of
- * gref#,pfn#,...,gref#,pfn#
- * FIXME, 0 is reserved by system, use it as terminator.
- */
- for (i = 0; i < (PAGE_PFNNUM) / 2; i++) {
- if (gnt->gref_pfn[i * 2].gref <= 0)
- break;
- }
-
- return i;
-}
-
-/* Frees memory allocated to a grant node */
-static void
-xen_free_gntnode(struct xen_gntnode *gntnode)
-{
- if (gntnode == NULL)
- return;
- free(gntnode->gnt_info);
- free(gntnode);
-}
-
-/*
- * Parse a grant node.
- * @param domid
- * Guest domain id.
- * @param path
- * Full path string for a grant node, like for the following (key, val) pair
- * idx#_mempool_gref = "gref#, gref#, gref#"
- * path = 'local/domain/domid/control/dpdk/idx#_mempool_gref'
- * gref# is a shared page contain packed (gref,pfn) entries
- * @return
- * Returns the pointer to xen_gntnode
- */
-static struct xen_gntnode *
-parse_gntnode(int dom_id, char *path)
-{
- char **gref_list = NULL;
- uint32_t i, len, gref_num;
- void *addr = NULL;
- char *buf = NULL;
- struct xen_gntnode *gntnode = NULL;
- struct xen_gnt *gnt = NULL;
- int pg_sz = getpagesize();
- char *end;
- uint64_t index;
-
- if ((buf = xen_read_node(path, &len)) == NULL)
- goto err;
-
- gref_list = malloc(MAX_GREF_PER_NODE * sizeof(char *));
- if (gref_list == NULL)
- goto err;
-
- gref_num = rte_strsplit(buf, len, gref_list, MAX_GREF_PER_NODE,
- XEN_GREF_SPLITTOKEN);
- if (gref_num == 0) {
- RTE_LOG(ERR, XENHOST, " %s: invalid grant node format\n", __func__);
- goto err;
- }
-
- gntnode = calloc(1, sizeof(struct xen_gntnode));
- gnt = calloc(gref_num, sizeof(struct xen_gnt));
- if (gnt == NULL || gntnode == NULL)
- goto err;
-
- for (i = 0; i < gref_num; i++) {
- errno = 0;
- gnt[i].gref = strtol(gref_list[i], &end, 0);
- if (errno != 0 || end == NULL || end == gref_list[i] ||
- (*end != '\0' && *end != XEN_GREF_SPLITTOKEN)) {
- RTE_LOG(ERR, XENHOST, " %s: parse grant node item failed\n", __func__);
- goto err;
- }
- addr = xen_grant_mmap(NULL, dom_id, gnt[i].gref, &index);
- if (addr == NULL) {
- RTE_LOG(ERR, XENHOST, " %s: map gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
- RTE_LOG(INFO, XENHOST, " %s: map gref %u to %p\n", __func__, gnt[i].gref, addr);
- memcpy(gnt[i].gref_pfn, addr, pg_sz);
- if (munmap(addr, pg_sz)) {
- RTE_LOG(INFO, XENHOST, " %s: unmap gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
- if (xen_unmap_grant_ref(index)) {
- RTE_LOG(INFO, XENHOST, " %s: release gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
-
- }
-
- gntnode->gnt_num = gref_num;
- gntnode->gnt_info = gnt;
-
- free(buf);
- free(gref_list);
- return gntnode;
-
-err:
- free(gnt);
- free(gntnode);
- free(gref_list);
- free(buf);
- return NULL;
-}
-
-/*
- * This function maps grant node of vring or mbuf pool to a continuous virtual address space,
- * and returns mapped address, pfn array, index array
- * @param gntnode
- * Pointer to grant node
- * @param domid
- * Guest domain id
- * @param ppfn
- * Pointer to pfn array, caller should free this array
- * @param pgs
- * Pointer to number of pages
- * @param ppindex
- * Pointer to index array, used to release grefs when to free this node
- * @return
- * Pointer to mapped virtual address, NULL on failure
- */
-static void *
-map_gntnode(struct xen_gntnode *gntnode, int domid, uint32_t **ppfn, uint32_t *pgs, uint64_t **ppindex)
-{
- struct xen_gnt *gnt;
- uint32_t i, j;
- size_t total_pages = 0;
- void *addr;
- uint32_t *pfn;
- uint64_t *pindex;
- uint32_t pfn_num = 0;
- int pg_sz;
-
- if (gntnode == NULL)
- return NULL;
-
- pg_sz = getpagesize();
- for (i = 0; i < gntnode->gnt_num; i++) {
- gnt = gntnode->gnt_info + i;
- total_pages += cal_pagenum(gnt);
- }
- if ((addr = get_xen_virtual(total_pages * pg_sz, pg_sz)) == NULL) {
- RTE_LOG(ERR, XENHOST, " %s: failed get_xen_virtual\n", __func__);
- return NULL;
- }
- pfn = calloc(total_pages, (size_t)sizeof(uint32_t));
- pindex = calloc(total_pages, (size_t)sizeof(uint64_t));
- if (pfn == NULL || pindex == NULL) {
- free_xen_virtual(addr, total_pages * pg_sz, pg_sz);
- free(pfn);
- free(pindex);
- return NULL;
- }
-
- RTE_LOG(INFO, XENHOST, " %s: total pages:%zu, map to [%p, %p]\n", __func__, total_pages, addr, RTE_PTR_ADD(addr, total_pages * pg_sz - 1));
- for (i = 0; i < gntnode->gnt_num; i++) {
- gnt = gntnode->gnt_info + i;
- for (j = 0; j < (PAGE_PFNNUM) / 2; j++) {
- if ((gnt->gref_pfn[j * 2].gref) <= 0)
- goto _end;
- /*alternative: batch map, or through libxc*/
- if (xen_grant_mmap(RTE_PTR_ADD(addr, pfn_num * pg_sz),
- domid,
- gnt->gref_pfn[j * 2].gref,
- &pindex[pfn_num]) == NULL) {
- goto mmap_failed;
- }
- pfn[pfn_num] = gnt->gref_pfn[j * 2 + 1].pfn_num;
- pfn_num++;
- }
- }
-
-mmap_failed:
- if (pfn_num)
- munmap(addr, pfn_num * pg_sz);
- for (i = 0; i < pfn_num; i++) {
- xen_unmap_grant_ref(pindex[i]);
- }
- free(pindex);
- free(pfn);
- return NULL;
-
-_end:
- if (ppindex)
- *ppindex = pindex;
- else
- free(pindex);
- if (ppfn)
- *ppfn = pfn;
- else
- free(pfn);
- if (pgs)
- *pgs = total_pages;
-
- return addr;
-}
-
-static int
-parse_mpool_va(struct xen_mempool *mempool)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len;
- char *end;
- int ret = -1;
-
- errno = 0;
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_GVA_SUFFIX,
- mempool->dom_id, mempool->pool_idx);
-
- if((buf = xen_read_node(path, &len)) == NULL)
- goto out;
- mempool->gva = (void *)strtoul(buf, &end, 16);
- if (errno != 0 || end == NULL || end == buf || *end != '\0') {
- mempool->gva = NULL;
- goto out;
- }
- ret = 0;
-out:
- free(buf);
- return ret;
-}
-
-/*
- * map mbuf pool
- */
-static int
-map_mempoolnode(struct xen_gntnode *gntnode,
- struct xen_mempool *mempool)
-{
- if (gntnode == NULL || mempool == NULL)
- return -1;
-
- mempool->hva =
- map_gntnode(gntnode, mempool->dom_id, &mempool->mempfn_tbl, &mempool->mempfn_num, &mempool->pindex);
-
- RTE_LOG(INFO, XENHOST, " %s: map mempool at %p\n", __func__, (void *)mempool->hva);
- if (mempool->hva)
- return 0;
- else {
- return -1;
- }
-}
-
-void
-cleanup_mempool(struct xen_mempool *mempool)
-{
- int pg_sz = getpagesize();
- uint32_t i;
-
- if (mempool->hva)
- munmap(mempool->hva, mempool->mempfn_num * pg_sz);
- mempool->hva = NULL;
-
- if (mempool->pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap dom %02u mempool%02u %u grefs\n",
- __func__,
- mempool->dom_id,
- mempool->pool_idx,
- mempool->mempfn_num);
- for (i = 0; i < mempool->mempfn_num; i ++) {
- xen_unmap_grant_ref(mempool->pindex[i]);
- }
- }
- mempool->pindex = NULL;
-
- free(mempool->mempfn_tbl);
- mempool->mempfn_tbl = NULL;
-}
-
-/*
- * process mempool node idx#_mempool_gref, idx = 0, 1, 2...
- * until we encounter a node that doesn't exist.
- */
-int
-parse_mempoolnode(struct xen_guest *guest)
-{
- uint32_t i, len;
- char path[PATH_MAX] = {0};
- struct xen_gntnode *gntnode = NULL;
- struct xen_mempool *mempool = NULL;
- char *buf;
-
- bzero(&guest->mempool, MAX_XENVIRT_MEMPOOL * sizeof(guest->mempool[0]));
- guest->pool_num = 0;
-
- while (1) {
- /* check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_MEMPOOL_SUFFIX,
- guest->dom_id,
- guest->pool_num);
-
- if ((buf = xen_read_node(path, &len)) != NULL) {
- /* this node exists */
- free(buf);
- } else {
- if (guest->pool_num == 0) {
- RTE_LOG(ERR, PMD, "no mempool found\n");
- return -1;
- }
- break;
- }
-
- mempool = &guest->mempool[guest->pool_num];
- mempool->dom_id = guest->dom_id;
- mempool->pool_idx = guest->pool_num;
-
- RTE_LOG(INFO, XENHOST, " %s: mempool %u parse gntnode %s\n", __func__, guest->pool_num, path);
- gntnode = parse_gntnode(guest->dom_id, path);
- if (gntnode == NULL)
- goto err;
-
- if (parse_mpool_va(mempool))
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: mempool %u map gntnode %s\n", __func__, guest->pool_num, path);
- if (map_mempoolnode(gntnode, mempool))
- goto err;
-
- xen_free_gntnode(gntnode);
- guest->pool_num++;
- }
-
- return 0;
-err:
- if (gntnode)
- xen_free_gntnode(gntnode);
- for (i = 0; i < MAX_XENVIRT_MEMPOOL ; i++) {
- cleanup_mempool(&guest->mempool[i]);
- }
- /* reinitialise mempool */
- bzero(&guest->mempool, MAX_XENVIRT_MEMPOOL * sizeof(guest->mempool[0]));
- return -1;
-}
-
-static int
-xen_map_vringflag(struct xen_vring *vring)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len,gref;
- int pg_sz = getpagesize();
- char *end;
-
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_VRINGFLAG_SUFFIX,
- vring->dom_id, vring->virtio_idx);
-
- if((buf = xen_read_node(path, &len)) == NULL)
- goto err;
-
- errno = 0;
- gref = strtol(buf, &end, 0);
- if (errno != 0 || end == NULL || end == buf) {
- goto err;
- }
- vring->flag = xen_grant_mmap(0, vring->dom_id, gref, &vring->flag_index);
- if (vring->flag == NULL || *vring->flag == 0)
- goto err;
-
- free(buf);
- return 0;
-err:
- free(buf);
- if (vring->flag) {
- munmap(vring->flag, pg_sz);
- vring->flag = NULL;
- xen_unmap_grant_ref(vring->flag_index);
- }
- return -1;
-}
-
-
-static int
-xen_map_rxvringnode(struct xen_gntnode *gntnode,
- struct xen_vring *vring)
-{
- vring->rxvring_addr =
- map_gntnode(gntnode, vring->dom_id, &vring->rxpfn_tbl, &vring->rxpfn_num, &vring->rx_pindex);
- RTE_LOG(INFO, XENHOST, " %s: map rx vring at %p\n", __func__, (void *)vring->rxvring_addr);
- if (vring->rxvring_addr)
- return 0;
- else
- return -1;
-}
-
-static int
-xen_map_txvringnode(struct xen_gntnode *gntnode,
- struct xen_vring *vring)
-{
- vring->txvring_addr =
- map_gntnode(gntnode, vring->dom_id, &vring->txpfn_tbl, &vring->txpfn_num, &vring->tx_pindex);
- RTE_LOG(INFO, XENHOST, " %s: map tx vring at %p\n", __func__, (void *)vring->txvring_addr);
- if (vring->txvring_addr)
- return 0;
- else
- return -1;
-}
-
-void
-cleanup_vring(struct xen_vring *vring)
-{
- int pg_sz = getpagesize();
- uint32_t i;
-
- RTE_LOG(INFO, XENHOST, " %s: cleanup dom %u vring %u\n", __func__, vring->dom_id, vring->virtio_idx);
- if (vring->rxvring_addr) {
- munmap(vring->rxvring_addr, vring->rxpfn_num * pg_sz);
- RTE_LOG(INFO, XENHOST, " %s: unmap rx vring [%p, %p]\n",
- __func__,
- vring->rxvring_addr,
- RTE_PTR_ADD(vring->rxvring_addr,
- vring->rxpfn_num * pg_sz - 1));
- }
- vring->rxvring_addr = NULL;
-
-
- if (vring->rx_pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap rx vring %u grefs\n", __func__, vring->rxpfn_num);
- for (i = 0; i < vring->rxpfn_num; i++) {
- xen_unmap_grant_ref(vring->rx_pindex[i]);
- }
- }
- vring->rx_pindex = NULL;
-
- free(vring->rxpfn_tbl);
- vring->rxpfn_tbl = NULL;
-
- if (vring->txvring_addr) {
- munmap(vring->txvring_addr, vring->txpfn_num * pg_sz);
- RTE_LOG(INFO, XENHOST, " %s: unmap tx vring [%p, %p]\n",
- __func__,
- vring->txvring_addr,
- RTE_PTR_ADD(vring->txvring_addr,
- vring->txpfn_num * pg_sz - 1));
- }
- vring->txvring_addr = NULL;
-
- if (vring->tx_pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap tx vring %u grefs\n", __func__, vring->txpfn_num);
- for (i = 0; i < vring->txpfn_num; i++) {
- xen_unmap_grant_ref(vring->tx_pindex[i]);
- }
- }
- vring->tx_pindex = NULL;
-
- free(vring->txpfn_tbl);
- vring->txpfn_tbl = NULL;
-
- if (vring->flag) {
- if (!munmap((void *)vring->flag, pg_sz))
- RTE_LOG(INFO, XENHOST, " %s: unmap flag page at %p\n", __func__, vring->flag);
- if (!xen_unmap_grant_ref(vring->flag_index))
- RTE_LOG(INFO, XENHOST, " %s: release flag ref index 0x%" PRIx64 "\n", __func__, vring->flag_index);
- }
- vring->flag = NULL;
- return;
-}
-
-
-
-static int
-xen_parse_etheraddr(struct xen_vring *vring)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len;
- int ret = -1;
-
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_ADDR_SUFFIX,
- vring->dom_id, vring->virtio_idx);
-
- if ((buf = xen_read_node(path, &len)) == NULL)
- goto out;
-
- if (cmdline_parse_etheraddr(NULL, buf, &vring->addr,
- sizeof(vring->addr)) < 0)
- goto out;
- ret = 0;
-out:
- free(buf);
- return ret;
-}
-
-
-int
-parse_vringnode(struct xen_guest *guest, uint32_t virtio_idx)
-{
- char path[PATH_MAX] = {0};
- struct xen_gntnode *rx_gntnode = NULL;
- struct xen_gntnode *tx_gntnode = NULL;
- struct xen_vring *vring = NULL;
-
- /*check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_RXVRING_SUFFIX,
- guest->dom_id,
- virtio_idx);
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u parse rx gntnode %s\n", __func__, virtio_idx, path);
- rx_gntnode = parse_gntnode(guest->dom_id, path);
- if (rx_gntnode == NULL)
- goto err;
-
- /*check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_TXVRING_SUFFIX,
- guest->dom_id,
- virtio_idx);
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u parse tx gntnode %s\n", __func__, virtio_idx, path);
- tx_gntnode = parse_gntnode(guest->dom_id, path);
- if (tx_gntnode == NULL)
- goto err;
-
- vring = &guest->vring[virtio_idx];
- bzero(vring, sizeof(*vring));
- vring->dom_id = guest->dom_id;
- vring->virtio_idx = virtio_idx;
-
- if (xen_parse_etheraddr(vring) != 0)
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u map rx gntnode %s\n", __func__, virtio_idx, path);
- if (xen_map_rxvringnode(rx_gntnode, vring) != 0)
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u map tx gntnode %s\n", __func__, virtio_idx, path);
- if (xen_map_txvringnode(tx_gntnode, vring) != 0)
- goto err;
-
- if (xen_map_vringflag(vring) != 0)
- goto err;
-
- guest->vring_num++;
-
- xen_free_gntnode(rx_gntnode);
- xen_free_gntnode(tx_gntnode);
-
- return 0;
-
-err:
- if (rx_gntnode)
- xen_free_gntnode(rx_gntnode);
- if (tx_gntnode)
- xen_free_gntnode(tx_gntnode);
- if (vring) {
- cleanup_vring(vring);
- bzero(vring, sizeof(*vring));
- }
- return -1;
-}
-
-/*
- * Open xen grant dev driver
- * @return
- * 0 on success, -1 on failure.
- */
-static int
-xen_grant_init(void)
-{
- d_fd = open(XEN_GNTDEV_FNAME, O_RDWR);
-
- return d_fd == -1? (-1): (0);
-}
-
-/*
- * Initialise xenstore handle and open grant dev driver.
- * @return
- * 0 on success, -1 on failure.
- */
-int
-xenhost_init(void)
-{
- xs = xs_daemon_open();
- if (xs == NULL) {
- rte_panic("failed initialize xen daemon handler");
- return -1;
- }
- if (xen_grant_init())
- return -1;
- return 0;
-}
diff --git a/examples/vm_power_manager/Makefile b/examples/vm_power_manager/Makefile
index 59a96417..9cf20a28 100644
--- a/examples/vm_power_manager/Makefile
+++ b/examples/vm_power_manager/Makefile
@@ -54,6 +54,22 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lvirt
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
+LDLIBS += -lrte_pmd_i40e
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y)
+LDLIBS += -lrte_pmd_bnxt
+endif
+
+endif
+
# workaround for a gcc bug with noreturn attribute
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index e068ae28..ab856bdb 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -574,6 +574,73 @@ set_channel_status(const char *vm_name, unsigned *channel_list,
return num_channels_changed;
}
+void
+get_all_vm(int *num_vm, int *num_vcpu)
+{
+
+ virNodeInfo node_info;
+ virDomainPtr *domptr;
+ uint64_t mask;
+ int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
+ unsigned int jj;
+ const char *vm_name;
+ unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
+ VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
+ unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
+
+
+ memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+ if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+ return;
+ }
+
+ /* Returns number of pcpus */
+ global_n_host_cpus = (unsigned int)node_info.cpus;
+
+ /* Returns number of active domains */
+ *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
+ domain_flags);
+ if (*num_vm <= 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
+ return;
+ }
+
+ for (i = 0; i < *num_vm; i++) {
+
+ /* Get Domain Names */
+ vm_name = virDomainGetName(domptr[i]);
+ lvm_info[i].vm_name = vm_name;
+
+ /* Get Number of Vcpus */
+ numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
+
+ /* Get Number of VCpus & VcpuPinInfo */
+ n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
+ numVcpus[i], global_cpumaps,
+ global_maplen, domain_flag);
+
+ if ((int)n_vcpus > 0) {
+ *num_vcpu = n_vcpus;
+ lvm_info[i].num_cpus = n_vcpus;
+ }
+
+ /* Save pcpu in use by libvirt VMs */
+ for (ii = 0; ii < n_vcpus; ii++) {
+ mask = 0;
+ for (jj = 0; jj < global_n_host_cpus; jj++) {
+ if (VIR_CPU_USABLE(global_cpumaps,
+ global_maplen, ii, jj) > 0) {
+ mask |= 1ULL << jj;
+ }
+ }
+ ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
+ lvm_info[i].pcpus[ii] = cpu;
+ }
+ }
+ }
+}
+
int
get_info_vm(const char *vm_name, struct vm_info *info)
{
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 47c3b9cd..358fb8f2 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -66,6 +66,17 @@ struct sockaddr_un _sockaddr_un;
#define UNIX_PATH_MAX sizeof(_sockaddr_un.sun_path)
#endif
+#define MAX_VMS 4
+#define MAX_VCPUS 20
+
+
+struct libvirt_vm_info {
+ const char *vm_name;
+ unsigned int pcpus[MAX_VCPUS];
+ uint8_t num_cpus;
+};
+
+struct libvirt_vm_info lvm_info[MAX_VMS];
/* Communication Channel Status */
enum channel_status { CHANNEL_MGR_CHANNEL_DISCONNECTED = 0,
CHANNEL_MGR_CHANNEL_CONNECTED,
@@ -319,6 +330,20 @@ int set_channel_status(const char *vm_name, unsigned *channel_list,
*/
int get_info_vm(const char *vm_name, struct vm_info *info);
+/**
+ * Populates a table with all domains running and their physical cpu.
+ * All information is gathered through libvirt api.
+ *
+ * @param num_vm
+ * modified to store number of active VMs
+ *
+ * @param num_vcpu
+ modified to store number of vcpus active
+ *
+ * @return
+ * void
+ */
+void get_all_vm(int *num_vm, int *num_vcpu);
#ifdef __cplusplus
}
#endif
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index e7f5cc4a..37e71ed9 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -41,13 +41,17 @@
#include <sys/types.h>
#include <sys/epoll.h>
#include <sys/queue.h>
+#include <sys/time.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_malloc.h>
#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_pmd_i40e.h>
-
+#include <libvirt/libvirt.h>
#include "channel_monitor.h"
#include "channel_commands.h"
#include "channel_manager.h"
@@ -57,10 +61,15 @@
#define MAX_EVENTS 256
+uint64_t vsi_pkt_count_prev[384];
+uint64_t rdtsc_prev[384];
+double time_period_ms = 1;
static volatile unsigned run_loop = 1;
static int global_event_fd;
+static unsigned int policy_is_set;
static struct epoll_event *global_events_list;
+static struct policy policies[MAX_VMS];
void channel_monitor_exit(void)
{
@@ -68,6 +77,287 @@ void channel_monitor_exit(void)
rte_free(global_events_list);
}
+static void
+core_share(int pNo, int z, int x, int t)
+{
+ if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
+ if (strcmp(policies[pNo].pkt.vm_name,
+ lvm_info[x].vm_name) != 0) {
+ policies[pNo].core_share[z].status = 1;
+ power_manager_scale_core_max(
+ policies[pNo].core_share[z].pcpu);
+ }
+ }
+}
+
+static void
+core_share_status(int pNo)
+{
+
+ int noVms, noVcpus, z, x, t;
+
+ get_all_vm(&noVms, &noVcpus);
+
+ /* Reset Core Share Status. */
+ for (z = 0; z < noVcpus; z++)
+ policies[pNo].core_share[z].status = 0;
+
+ /* Foreach vcpu in a policy. */
+ for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
+ /* Foreach VM on the platform. */
+ for (x = 0; x < noVms; x++) {
+ /* Foreach vcpu of VMs on platform. */
+ for (t = 0; t < lvm_info[x].num_cpus; t++)
+ core_share(pNo, z, x, t);
+ }
+ }
+}
+
+static void
+get_pcpu_to_control(struct policy *pol)
+{
+
+ /* Convert vcpu to pcpu. */
+ struct vm_info info;
+ int pcpu, count;
+ uint64_t mask_u64b;
+
+ RTE_LOG(INFO, CHANNEL_MONITOR, "Looking for pcpu for %s\n",
+ pol->pkt.vm_name);
+ get_info_vm(pol->pkt.vm_name, &info);
+
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ mask_u64b = info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
+ for (pcpu = 0; mask_u64b; mask_u64b &= ~(1ULL << pcpu++)) {
+ if ((mask_u64b >> pcpu) & 1)
+ pol->core_share[count].pcpu = pcpu;
+ }
+ }
+}
+
+static int
+get_pfid(struct policy *pol)
+{
+
+ int i, x, ret = 0, nb_ports;
+
+ nb_ports = rte_eth_dev_count();
+ for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
+
+ for (x = 0; x < nb_ports; x++) {
+ ret = rte_pmd_i40e_query_vfid_by_mac(x,
+ (struct ether_addr *)&(pol->pkt.vfid[i]));
+ if (ret != -EINVAL) {
+ pol->port[i] = x;
+ break;
+ }
+ }
+ if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Error with Policy. MAC not found on "
+ "attached ports ");
+ pol->enabled = 0;
+ return ret;
+ }
+ pol->pfid[i] = ret;
+ }
+ return 1;
+}
+
+static int
+update_policy(struct channel_packet *pkt)
+{
+
+ unsigned int updated = 0;
+ int i;
+
+ for (i = 0; i < MAX_VMS; i++) {
+ if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ if (get_pfid(&policies[i]) == -1) {
+ updated = 1;
+ break;
+ }
+ core_share_status(i);
+ policies[i].enabled = 1;
+ updated = 1;
+ }
+ }
+ if (!updated) {
+ for (i = 0; i < MAX_VMS; i++) {
+ if (policies[i].enabled == 0) {
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ if (get_pfid(&policies[i]) == -1)
+ break;
+ core_share_status(i);
+ policies[i].enabled = 1;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static uint64_t
+get_pkt_diff(struct policy *pol)
+{
+
+ uint64_t vsi_pkt_count,
+ vsi_pkt_total = 0,
+ vsi_pkt_count_prev_total = 0;
+ double rdtsc_curr, rdtsc_diff, diff;
+ int x;
+ struct rte_eth_stats vf_stats;
+
+ for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
+
+ /*Read vsi stats*/
+ if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
+ vsi_pkt_count = vf_stats.ipackets;
+ else
+ vsi_pkt_count = -1;
+
+ vsi_pkt_total += vsi_pkt_count;
+
+ vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
+ vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
+ }
+
+ rdtsc_curr = rte_rdtsc_precise();
+ rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
+ rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
+
+ diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
+ ((double)rte_get_tsc_hz() / rdtsc_diff);
+
+ return diff;
+}
+
+static void
+apply_traffic_profile(struct policy *pol)
+{
+
+ int count;
+ uint64_t diff = 0;
+
+ diff = get_pkt_diff(pol);
+
+ RTE_LOG(INFO, CHANNEL_MONITOR, "Applying traffic profile\n");
+
+ if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_time_profile(struct policy *pol)
+{
+
+ int count, x;
+ struct timeval tv;
+ struct tm *ptm;
+ char time_string[40];
+
+ /* Obtain the time of day, and convert it to a tm struct. */
+ gettimeofday(&tv, NULL);
+ ptm = localtime(&tv.tv_sec);
+ /* Format the date and time, down to a single second. */
+ strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
+
+ for (x = 0; x < HOURS; x++) {
+
+ if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Scaling up core %d to max\n",
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.quiet_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Scaling down core %d to min\n",
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
+ apply_traffic_profile(pol);
+ break;
+ }
+ }
+}
+
+static void
+apply_workload_profile(struct policy *pol)
+{
+
+ int count;
+
+ if (pol->pkt.workload == HIGH) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == MEDIUM) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == LOW) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_policy(struct policy *pol)
+{
+
+ struct channel_packet *pkt = &pol->pkt;
+
+ /*Check policy to use*/
+ if (pkt->policy_to_use == TRAFFIC)
+ apply_traffic_profile(pol);
+ else if (pkt->policy_to_use == TIME)
+ apply_time_profile(pol);
+ else if (pkt->policy_to_use == WORKLOAD)
+ apply_workload_profile(pol);
+}
+
+
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
@@ -105,6 +395,12 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
case(CPU_POWER_SCALE_UP):
power_manager_scale_core_up(core_num);
break;
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_core(core_num);
+ break;
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_core(core_num);
+ break;
default:
break;
}
@@ -122,12 +418,25 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
case(CPU_POWER_SCALE_UP):
power_manager_scale_mask_up(core_mask);
break;
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_mask(core_mask);
+ break;
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_mask(core_mask);
+ break;
default:
break;
}
}
}
+
+ if (pkt->command == PKT_POLICY) {
+ RTE_LOG(INFO, CHANNEL_MONITOR, "\nProcessing Policy request from Guest\n");
+ update_policy(pkt);
+ policy_is_set = 1;
+ }
+
/* Return is not checked as channel status may have been set to DISABLED
* from management thread
*/
@@ -197,9 +506,10 @@ run_channel_monitor(void)
struct channel_info *chan_info = (struct channel_info *)
global_events_list[i].data.ptr;
if ((global_events_list[i].events & EPOLLERR) ||
- (global_events_list[i].events & EPOLLHUP)) {
+ (global_events_list[i].events & EPOLLHUP)) {
RTE_LOG(DEBUG, CHANNEL_MONITOR, "Remote closed connection for "
- "channel '%s'\n", chan_info->channel_path);
+ "channel '%s'\n",
+ chan_info->channel_path);
remove_channel(&chan_info);
continue;
}
@@ -211,14 +521,17 @@ run_channel_monitor(void)
int buffer_len = sizeof(pkt);
while (buffer_len > 0) {
- n_bytes = read(chan_info->fd, buffer, buffer_len);
+ n_bytes = read(chan_info->fd,
+ buffer, buffer_len);
if (n_bytes == buffer_len)
break;
if (n_bytes == -1) {
err = errno;
- RTE_LOG(DEBUG, CHANNEL_MONITOR, "Received error on "
- "channel '%s' read: %s\n",
- chan_info->channel_path, strerror(err));
+ RTE_LOG(DEBUG, CHANNEL_MONITOR,
+ "Received error on "
+ "channel '%s' read: %s\n",
+ chan_info->channel_path,
+ strerror(err));
remove_channel(&chan_info);
break;
}
@@ -229,5 +542,14 @@ run_channel_monitor(void)
process_request(&pkt, chan_info);
}
}
+ rte_delay_us(time_period_ms*1000);
+ if (policy_is_set) {
+ int j;
+
+ for (j = 0; j < MAX_VMS; j++) {
+ if (policies[j].enabled == 1)
+ apply_policy(&policies[j]);
+ }
+ }
}
}
diff --git a/examples/vm_power_manager/channel_monitor.h b/examples/vm_power_manager/channel_monitor.h
index c1386079..b52c1fca 100644
--- a/examples/vm_power_manager/channel_monitor.h
+++ b/examples/vm_power_manager/channel_monitor.h
@@ -35,6 +35,24 @@
#define CHANNEL_MONITOR_H_
#include "channel_manager.h"
+#include "channel_commands.h"
+
+struct core_share {
+ unsigned int pcpu;
+ /*
+ * 1 CORE SHARE
+ * 0 NOT SHARED
+ */
+ int status;
+};
+
+struct policy {
+ struct channel_packet pkt;
+ uint32_t pfid[MAX_VFS];
+ uint32_t port[MAX_VFS];
+ unsigned int enabled;
+ struct core_share core_share[MAX_VCPU_PER_VM];
+};
#ifdef __cplusplus
extern "C" {
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 7931135e..63f711e0 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -45,8 +45,10 @@
#include <cmdline.h>
#include <rte_log.h>
#include <rte_lcore.h>
+#include <rte_ethdev.h>
#include <rte_power.h>
+#include <guest_channel.h>
#include "vm_power_cli_guest.h"
@@ -108,6 +110,10 @@ cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
ret = rte_power_freq_min(res->lcore_id);
else if (!strcmp(res->cmd , "max"))
ret = rte_power_freq_max(res->lcore_id);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = rte_power_freq_enable_turbo(res->lcore_id);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = rte_power_freq_disable_turbo(res->lcore_id);
if (ret != 1)
cmdline_printf(cl, "Error sending message: %s\n", strerror(ret));
}
@@ -120,13 +126,14 @@ cmdline_parse_token_string_t cmd_set_cpu_freq_core_num =
lcore_id, UINT8);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_set = {
.f = cmd_set_cpu_freq_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_num> <up|down|min|max>, Set the current "
- "frequency for the specified core by scaling up/down/min/max",
+ .help_str = "set_cpu_freq <core_num> "
+ "<up|down|min|max|enable_turbo|disable_turbo>, "
+ "adjust the frequency for the specified core.",
.tokens = {
(void *)&cmd_set_cpu_freq,
(void *)&cmd_set_cpu_freq_core_num,
@@ -135,8 +142,103 @@ cmdline_parse_inst_t cmd_set_cpu_freq_set = {
},
};
+struct cmd_send_policy_result {
+ cmdline_fixed_string_t send_policy;
+ cmdline_fixed_string_t cmd;
+};
+
+union PFID {
+ struct ether_addr addr;
+ uint64_t pfid;
+};
+
+static inline int
+send_policy(void)
+{
+ struct channel_packet pkt;
+ int ret;
+
+ union PFID pfid;
+ /* Use port MAC address as the vfid */
+ rte_eth_macaddr_get(0, &pfid.addr);
+ printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
+ "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ 1,
+ pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
+ pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
+ pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
+ pkt.vfid[0] = pfid.pfid;
+
+ pkt.nb_mac_to_monitor = 1;
+ pkt.t_boost_status.tbEnabled = false;
+
+ pkt.vcpu_to_control[0] = 0;
+ pkt.vcpu_to_control[1] = 1;
+ pkt.num_vcpu = 2;
+ /* Dummy Population. */
+ pkt.traffic_policy.min_packet_thresh = 96000;
+ pkt.traffic_policy.avg_max_packet_thresh = 1800000;
+ pkt.traffic_policy.max_max_packet_thresh = 2000000;
+
+ pkt.timer_policy.busy_hours[0] = 3;
+ pkt.timer_policy.busy_hours[1] = 4;
+ pkt.timer_policy.busy_hours[2] = 5;
+ pkt.timer_policy.quiet_hours[0] = 11;
+ pkt.timer_policy.quiet_hours[1] = 12;
+ pkt.timer_policy.quiet_hours[2] = 13;
+
+ pkt.timer_policy.hours_to_use_traffic_profile[0] = 8;
+ pkt.timer_policy.hours_to_use_traffic_profile[1] = 10;
+
+ pkt.workload = LOW;
+ pkt.policy_to_use = TIME;
+ pkt.command = PKT_POLICY;
+ strcpy(pkt.vm_name, "ubuntu2");
+ ret = rte_power_guest_channel_send_msg(&pkt, 1);
+ if (ret == 0)
+ return 1;
+ RTE_LOG(DEBUG, POWER, "Error sending message: %s\n",
+ ret > 0 ? strerror(ret) : "channel not connected");
+ return -1;
+}
+
+static void
+cmd_send_policy_parsed(void *parsed_result, struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret = -1;
+ struct cmd_send_policy_result *res = parsed_result;
+
+ if (!strcmp(res->cmd, "now")) {
+ printf("Sending Policy down now!\n");
+ ret = send_policy();
+ }
+ if (ret != 1)
+ cmdline_printf(cl, "Error sending message: %s\n",
+ strerror(ret));
+}
+
+cmdline_parse_token_string_t cmd_send_policy =
+ TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
+ send_policy, "send_policy");
+cmdline_parse_token_string_t cmd_send_policy_cmd_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
+ cmd, "now");
+
+cmdline_parse_inst_t cmd_send_policy_set = {
+ .f = cmd_send_policy_parsed,
+ .data = NULL,
+ .help_str = "send_policy now",
+ .tokens = {
+ (void *)&cmd_send_policy,
+ (void *)&cmd_send_policy_cmd_cmd,
+ NULL,
+ },
+};
+
cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_quit,
+ (cmdline_parse_inst_t *)&cmd_send_policy_set,
(cmdline_parse_inst_t *)&cmd_set_cpu_freq_set,
NULL,
};
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
index 0c4bdd5b..277eab3d 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
@@ -40,12 +40,6 @@ extern "C" {
#include "channel_commands.h"
-int guest_channel_host_connect(unsigned lcore_id);
-
-int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
-
-void guest_channel_host_disconnect(unsigned lcore_id);
-
void run_cli(__attribute__((unused)) void *arg);
#ifdef __cplusplus
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index c33fcc93..399fbdd4 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -49,14 +49,206 @@
#include <rte_log.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
+#include <rte_ethdev.h>
+#include <getopt.h>
+#include <rte_cycles.h>
#include <rte_debug.h>
#include "channel_manager.h"
#include "channel_monitor.h"
#include "power_manager.h"
#include "vm_power_cli.h"
+#include <rte_pmd_ixgbe.h>
+#include <rte_pmd_i40e.h>
+#include <rte_pmd_bnxt.h>
+
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+
+#define NUM_MBUFS 8191
+#define MBUF_CACHE_SIZE 250
+#define BURST_SIZE 32
+
+static uint32_t enabled_port_mask;
+static volatile bool force_quit;
+
+/****************/
+static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+};
+
+static inline int
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
+{
+ struct rte_eth_conf port_conf = port_conf_default;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ struct ether_addr addr;
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ (unsigned int)port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+
+ return 0;
+}
static int
+parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ { "mac-updating", no_argument, 0, 1},
+ { "no-mac-updating", no_argument, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ enabled_port_mask = parse_portmask(optarg);
+ if (enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ return -1;
+ }
+ break;
+ /* long options */
+ case 0:
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
+static void
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ if (force_quit)
+ return;
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if (force_quit)
+ return;
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", (uint16_t)portid,
+ (unsigned int)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint16_t)portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+static int
run_monitor(__attribute__((unused)) void *arg)
{
if (channel_monitor_init() < 0) {
@@ -82,6 +274,10 @@ main(int argc, char **argv)
{
int ret;
unsigned lcore_id;
+ unsigned int nb_ports;
+ struct rte_mempool *mbuf_pool;
+ uint16_t portid;
+
ret = rte_eal_init(argc, argv);
if (ret < 0)
@@ -90,12 +286,77 @@ main(int argc, char **argv)
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid arguments\n");
+
+ nb_ports = rte_eth_dev_count();
+
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* Initialize ports. */
+ for (portid = 0; portid < nb_ports; portid++) {
+ struct ether_addr eth;
+ int w, j;
+ int ret = -ENOTSUP;
+
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ eth.addr_bytes[0] = 0xe0;
+ eth.addr_bytes[1] = 0xe0;
+ eth.addr_bytes[2] = 0xe0;
+ eth.addr_bytes[3] = 0xe0;
+ eth.addr_bytes[4] = portid + 0xf0;
+
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ portid);
+
+ for (w = 0; w < MAX_VFS; w++) {
+ eth.addr_bytes[5] = w + 0xf0;
+
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_mac_addr(portid,
+ w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_mac_addr(portid,
+ w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_addr(portid,
+ w, &eth);
+
+ switch (ret) {
+ case 0:
+ printf("Port %d VF %d MAC: ",
+ portid, w);
+ for (j = 0; j < 6; j++) {
+ printf("%02x", eth.addr_bytes[j]);
+ if (j < 5)
+ printf(":");
+ }
+ printf("\n");
+ break;
+ }
+ }
+ }
+
lcore_id = rte_get_next_lcore(-1, 1, 0);
if (lcore_id == RTE_MAX_LCORE) {
RTE_LOG(ERR, EAL, "A minimum of two cores are required to run "
"application\n");
return 0;
}
+
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
rte_eal_remote_launch(run_monitor, NULL, lcore_id);
if (power_manager_init() < 0) {
diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index 2644fce6..1834a823 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -108,7 +108,7 @@ set_host_cpus_mask(void)
int
power_manager_init(void)
{
- unsigned i, num_cpus;
+ unsigned int i, num_cpus, num_freqs;
uint64_t cpu_mask;
int ret = 0;
@@ -121,15 +121,21 @@ power_manager_init(void)
rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
cpu_mask = global_enabled_cpus;
for (i = 0; cpu_mask; cpu_mask &= ~(1 << i++)) {
- if (rte_power_init(i) < 0 || rte_power_freqs(i,
- global_core_freq_info[i].freqs,
- RTE_MAX_LCORE_FREQS) == 0) {
- RTE_LOG(ERR, POWER_MANAGER, "Unable to initialize power manager "
+ if (rte_power_init(i) < 0)
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to initialize power manager "
"for core %u\n", i);
+ num_freqs = rte_power_freqs(i, global_core_freq_info[i].freqs,
+ RTE_MAX_LCORE_FREQS);
+ if (num_freqs == 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to get frequency list for core %u\n",
+ i);
global_enabled_cpus &= ~(1 << i);
num_cpus--;
ret = -1;
}
+ global_core_freq_info[i].num_freqs = num_freqs;
rte_spinlock_init(&global_core_freq_info[i].power_sl);
}
RTE_LOG(INFO, POWER_MANAGER, "Detected %u host CPUs , enabled core mask:"
@@ -216,6 +222,24 @@ power_manager_scale_mask_max(uint64_t core_mask)
}
int
+power_manager_enable_turbo_mask(uint64_t core_mask)
+{
+ int ret = 0;
+
+ POWER_SCALE_MASK(enable_turbo, core_mask, ret);
+ return ret;
+}
+
+int
+power_manager_disable_turbo_mask(uint64_t core_mask)
+{
+ int ret = 0;
+
+ POWER_SCALE_MASK(disable_turbo, core_mask, ret);
+ return ret;
+}
+
+int
power_manager_scale_core_up(unsigned core_num)
{
int ret = 0;
@@ -250,3 +274,37 @@ power_manager_scale_core_max(unsigned core_num)
POWER_SCALE_CORE(max, core_num, ret);
return ret;
}
+
+int
+power_manager_enable_turbo_core(unsigned int core_num)
+{
+ int ret = 0;
+
+ POWER_SCALE_CORE(enable_turbo, core_num, ret);
+ return ret;
+}
+
+int
+power_manager_disable_turbo_core(unsigned int core_num)
+{
+ int ret = 0;
+
+ POWER_SCALE_CORE(disable_turbo, core_num, ret);
+ return ret;
+}
+
+int
+power_manager_scale_core_med(unsigned int core_num)
+{
+ int ret = 0;
+
+ if (core_num >= POWER_MGR_MAX_CPUS)
+ return -1;
+ if (!(global_enabled_cpus & (1ULL << core_num)))
+ return -1;
+ rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
+ ret = rte_power_set_freq(core_num,
+ global_core_freq_info[core_num].num_freqs / 2);
+ rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
+ return ret;
+}
diff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h
index 1b45babf..b52fb4c7 100644
--- a/examples/vm_power_manager/power_manager.h
+++ b/examples/vm_power_manager/power_manager.h
@@ -113,6 +113,32 @@ int power_manager_scale_mask_min(uint64_t core_mask);
int power_manager_scale_mask_max(uint64_t core_mask);
/**
+ * Enable Turbo Boost on the cores specified in core_mask.
+ * It is thread-safe.
+ *
+ * @param core_mask
+ * The uint64_t bit-mask of cores to change frequency.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_enable_turbo_mask(uint64_t core_mask);
+
+/**
+ * Disable Turbo Boost on the cores specified in core_mask.
+ * It is thread-safe.
+ *
+ * @param core_mask
+ * The uint64_t bit-mask of cores to change frequency.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_disable_turbo_mask(uint64_t core_mask);
+
+/**
* Scale up frequency for the core specified by core_num.
* It is thread-safe.
*
@@ -168,6 +194,32 @@ int power_manager_scale_core_min(unsigned core_num);
int power_manager_scale_core_max(unsigned core_num);
/**
+ * Enable Turbo Boost for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to boost
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_enable_turbo_core(unsigned int core_num);
+
+/**
+ * Disable Turbo Boost for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to boost
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_disable_turbo_core(unsigned int core_num);
+
+/**
* Get the current freuency of the core specified by core_num
*
* @param core_num
@@ -179,6 +231,19 @@ int power_manager_scale_core_max(unsigned core_num);
*/
uint32_t power_manager_get_current_frequency(unsigned core_num);
+/**
+ * Scale to medium frequency for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to change frequency
+ *
+ * @return
+ * - 1 on success.
+ * - 0 if frequency not changed.
+ * - Negative on error.
+ */
+int power_manager_scale_core_med(unsigned int core_num);
#ifdef __cplusplus
}
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index c5e8d934..6f234fb7 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -520,6 +520,10 @@ cmd_set_cpu_freq_mask_parsed(void *parsed_result, struct cmdline *cl,
ret = power_manager_scale_mask_min(res->core_mask);
else if (!strcmp(res->cmd , "max"))
ret = power_manager_scale_mask_max(res->core_mask);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = power_manager_enable_turbo_mask(res->core_mask);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = power_manager_disable_turbo_mask(res->core_mask);
if (ret < 0) {
cmdline_printf(cl, "Error scaling core_mask(0x%"PRIx64") '%s' , not "
"all cores specified have been scaled\n",
@@ -535,14 +539,13 @@ cmdline_parse_token_num_t cmd_set_cpu_freq_mask_core_mask =
core_mask, UINT64);
cmdline_parse_token_string_t cmd_set_cpu_freq_mask_result =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_mask_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_mask_set = {
.f = cmd_set_cpu_freq_mask_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_mask> <up|down|min|max>, Set the current "
- "frequency for the cores specified in <core_mask> by scaling "
- "each up/down/min/max.",
+ .help_str = "set_cpu_freq <core_mask> <up|down|min|max|enable_turbo|disable_turbo>, adjust the current "
+ "frequency for the cores specified in <core_mask>",
.tokens = {
(void *)&cmd_set_cpu_freq_mask,
(void *)&cmd_set_cpu_freq_mask_core_mask,
@@ -614,6 +617,10 @@ cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
ret = power_manager_scale_core_min(res->core_num);
else if (!strcmp(res->cmd , "max"))
ret = power_manager_scale_core_max(res->core_num);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = power_manager_enable_turbo_core(res->core_num);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = power_manager_disable_turbo_core(res->core_num);
if (ret < 0) {
cmdline_printf(cl, "Error scaling core(%u) '%s'\n", res->core_num,
res->cmd);
@@ -628,13 +635,13 @@ cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
core_num, UINT8);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_set = {
.f = cmd_set_cpu_freq_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_num> <up|down|min|max>, Set the current "
- "frequency for the specified core by scaling up/down/min/max",
+ .help_str = "set_cpu_freq <core_num> <up|down|min|max|enable_turbo|disable_turbo>, adjust the current "
+ "frequency for the specified core",
.tokens = {
(void *)&cmd_set_cpu_freq,
(void *)&cmd_set_cpu_freq_core_num,
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 8949a115..84e9937d 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -121,7 +119,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
};
static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports; /**< The number of ports specified in command line */
/* array used for printing out statistics */
@@ -186,7 +184,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf *rxconf;
@@ -583,7 +581,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
int ret;
unsigned nb_ports, valid_num_ports;
- uint8_t portid;
+ uint16_t portid;
signal(SIGHUP, sighup_handler);
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index b6ebccb2..9dad2b8e 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -87,7 +85,7 @@
/* mask of enabled ports */
static uint32_t enabled_port_mask;
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports;
/* number of pools (if user does not specify any, 32 by default */
@@ -220,7 +218,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
@@ -646,7 +644,7 @@ main(int argc, char *argv[])
uintptr_t i;
int ret;
unsigned nb_ports, valid_num_ports;
- uint8_t portid;
+ uint16_t portid;
signal(SIGHUP, sighup_handler);
diff --git a/lib/Makefile b/lib/Makefile
index 86caba17..dc4e8df7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,6 +33,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-y += librte_compat
DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_PCI) += librte_pci
+DEPDIRS-librte_pci := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DEPDIRS-librte_ring := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
@@ -42,7 +44,6 @@ DEPDIRS-librte_mbuf := librte_eal librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DEPDIRS-librte_timer := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
-DEPDIRS-librte_cfgfile := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
DEPDIRS-librte_cmdline := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
@@ -51,8 +52,12 @@ DEPDIRS-librte_ether += librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_cryptodev += librte_kvargs
+DIRS-$(CONFIG_RTE_LIBRTE_SECURITY) += librte_security
+DEPDIRS-librte_security := librte_eal librte_mempool librte_ring librte_mbuf
+DEPDIRS-librte_security += librte_ether
+DEPDIRS-librte_security += librte_cryptodev
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
-DEPDIRS-librte_eventdev := librte_eal librte_ring
+DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ether librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
@@ -63,6 +68,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
DEPDIRS-librte_lpm := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += librte_acl
DEPDIRS-librte_acl := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_MEMBER) += librte_member
+DEPDIRS-librte_member := librte_eal librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_NET) += librte_net
DEPDIRS-librte_net := librte_mbuf librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += librte_ip_frag
@@ -82,6 +89,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
DEPDIRS-librte_power := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
DEPDIRS-librte_meter := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += librte_flow_classify
+DEPDIRS-librte_flow_classify := librte_net librte_table librte_acl
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
DEPDIRS-librte_sched += librte_timer
@@ -108,10 +117,13 @@ DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += librte_reorder
DEPDIRS-librte_reorder := librte_eal librte_mempool librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += librte_pdump
DEPDIRS-librte_pdump := librte_eal librte_mempool librte_mbuf librte_ether
+DIRS-$(CONFIG_RTE_LIBRTE_GSO) += librte_gso
+DEPDIRS-librte_gso := librte_eal librte_mbuf librte_ether librte_net
+DEPDIRS-librte_gso += librte_mempool
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
endif
-DEPDIRS-librte_kni:= librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_kni := librte_eal librte_mempool librte_mbuf librte_ether
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index 59767920..e7e3c91d 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -36,6 +36,7 @@ LIB = librte_acl.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_acl_version.map
diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c
index d1f40bef..67f41f3d 100644
--- a/lib/librte_acl/rte_acl.c
+++ b/lib/librte_acl/rte_acl.c
@@ -120,8 +120,7 @@ rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
* if both conditions are met:
* at build time compiler supports AVX2 and target cpu supports AVX2.
*/
-static void __attribute__((constructor))
-rte_acl_init(void)
+RTE_INIT(rte_acl_init)
{
enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT;
diff --git a/lib/librte_acl/rte_acl_osdep.h b/lib/librte_acl/rte_acl_osdep.h
index 9e4af530..ac712bfa 100644
--- a/lib/librte_acl/rte_acl_osdep.h
+++ b/lib/librte_acl/rte_acl_osdep.h
@@ -66,7 +66,6 @@
#include <rte_prefetch.h>
#include <rte_byteorder.h>
#include <rte_branch_prediction.h>
-#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
diff --git a/lib/librte_bitratestats/Makefile b/lib/librte_bitratestats/Makefile
index 58a20ea0..5054b679 100644
--- a/lib/librte_bitratestats/Makefile
+++ b/lib/librte_bitratestats/Makefile
@@ -35,10 +35,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_bitratestats.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal -lrte_metrics -lrte_ethdev
EXPORT_MAP := rte_bitratestats_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_BITRATE) := rte_bitrate.c
diff --git a/lib/librte_bitratestats/rte_bitrate.c b/lib/librte_bitratestats/rte_bitrate.c
index 3ceb3516..f373697a 100644
--- a/lib/librte_bitratestats/rte_bitrate.c
+++ b/lib/librte_bitratestats/rte_bitrate.c
@@ -84,7 +84,7 @@ rte_stats_bitrate_reg(struct rte_stats_bitrates *bitrate_data)
int
rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
- uint8_t port_id)
+ uint16_t port_id)
{
struct rte_stats_bitrate *port_data;
struct rte_eth_stats eth_stats;
diff --git a/lib/librte_bitratestats/rte_bitrate.h b/lib/librte_bitratestats/rte_bitrate.h
index 15fc270a..16467221 100644
--- a/lib/librte_bitratestats/rte_bitrate.h
+++ b/lib/librte_bitratestats/rte_bitrate.h
@@ -85,7 +85,7 @@ int rte_stats_bitrate_reg(struct rte_stats_bitrates *bitrate_data);
* - Negative value on error
*/
int rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
- uint8_t port_id);
+ uint16_t port_id);
#ifdef __cplusplus
}
diff --git a/lib/librte_cfgfile/Makefile b/lib/librte_cfgfile/Makefile
index 755ef11f..0bee43e2 100644
--- a/lib/librte_cfgfile/Makefile
+++ b/lib/librte_cfgfile/Makefile
@@ -38,6 +38,7 @@ LIB = librte_cfgfile.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(SRCDIR)/../librte_eal/common/include
EXPORT_MAP := rte_cfgfile_version.map
diff --git a/lib/librte_cfgfile/rte_cfgfile.c b/lib/librte_cfgfile/rte_cfgfile.c
index b54a523d..eacf93a8 100644
--- a/lib/librte_cfgfile/rte_cfgfile.c
+++ b/lib/librte_cfgfile/rte_cfgfile.c
@@ -35,21 +35,23 @@
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
+#include <errno.h>
#include <rte_common.h>
-#include <rte_string_fns.h>
#include "rte_cfgfile.h"
struct rte_cfgfile_section {
char name[CFG_NAME_LEN];
int num_entries;
- struct rte_cfgfile_entry *entries[0];
+ int allocated_entries;
+ struct rte_cfgfile_entry *entries;
};
struct rte_cfgfile {
int flags;
int num_sections;
- struct rte_cfgfile_section *sections[0];
+ int allocated_sections;
+ struct rte_cfgfile_section *sections;
};
/** when we resize a file structure, how many extra entries
@@ -105,6 +107,49 @@ _strip(char *str, unsigned len)
return newlen;
}
+static struct rte_cfgfile_section *
+_get_section(struct rte_cfgfile *cfg, const char *sectionname)
+{
+ int i;
+
+ for (i = 0; i < cfg->num_sections; i++) {
+ if (strncmp(cfg->sections[i].name, sectionname,
+ sizeof(cfg->sections[0].name)) == 0)
+ return &cfg->sections[i];
+ }
+ return NULL;
+}
+
+static int
+_add_entry(struct rte_cfgfile_section *section, const char *entryname,
+ const char *entryvalue)
+{
+ /* resize entry structure if we don't have room for more entries */
+ if (section->num_entries == section->allocated_entries) {
+ struct rte_cfgfile_entry *n_entries = realloc(
+ section->entries,
+ sizeof(struct rte_cfgfile_entry) *
+ ((section->allocated_entries) +
+ CFG_ALLOC_ENTRY_BATCH));
+
+ if (n_entries == NULL)
+ return -ENOMEM;
+
+ section->entries = n_entries;
+ section->allocated_entries += CFG_ALLOC_ENTRY_BATCH;
+ }
+ /* fill up entry fields with key name and value */
+ struct rte_cfgfile_entry *curr_entry =
+ &section->entries[section->num_entries];
+
+ snprintf(curr_entry->name, sizeof(curr_entry->name), "%s", entryname);
+ snprintf(curr_entry->value,
+ sizeof(curr_entry->value), "%s", entryvalue);
+ section->num_entries++;
+
+ return 0;
+}
+
static int
rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params)
{
@@ -144,10 +189,6 @@ struct rte_cfgfile *
rte_cfgfile_load_with_params(const char *filename, int flags,
const struct rte_cfgfile_parameters *params)
{
- int allocated_sections = CFG_ALLOC_SECTION_BATCH;
- int allocated_entries = 0;
- int curr_section = -1;
- int curr_entry = -1;
char buffer[CFG_NAME_LEN + CFG_VALUE_LEN + 4] = {0};
int lineno = 0;
struct rte_cfgfile *cfg = NULL;
@@ -159,28 +200,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
if (f == NULL)
return NULL;
- cfg = malloc(sizeof(*cfg) + sizeof(cfg->sections[0]) *
- allocated_sections);
- if (cfg == NULL)
- goto error2;
-
- memset(cfg->sections, 0, sizeof(cfg->sections[0]) * allocated_sections);
-
- if (flags & CFG_FLAG_GLOBAL_SECTION) {
- curr_section = 0;
- allocated_entries = CFG_ALLOC_ENTRY_BATCH;
- cfg->sections[curr_section] = malloc(
- sizeof(*cfg->sections[0]) +
- sizeof(cfg->sections[0]->entries[0]) *
- allocated_entries);
- if (cfg->sections[curr_section] == NULL) {
- printf("Error - no memory for global section\n");
- goto error1;
- }
-
- snprintf(cfg->sections[curr_section]->name,
- sizeof(cfg->sections[0]->name), "GLOBAL");
- }
+ cfg = rte_cfgfile_create(flags);
while (fgets(buffer, sizeof(buffer), f) != NULL) {
char *pos = NULL;
@@ -191,13 +211,15 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
"Check if line too long\n", lineno);
goto error1;
}
+ /* skip parsing if comment character found */
pos = memchr(buffer, params->comment_character, len);
- if (pos != NULL) {
+ if (pos != NULL && (*(pos-1) != '\\')) {
*pos = '\0';
len = pos - buffer;
}
len = _strip(buffer, len);
+ /* skip lines without useful content */
if (buffer[0] != '[' && memchr(buffer, '=', len) == NULL)
continue;
@@ -205,151 +227,252 @@ rte_cfgfile_load_with_params(const char *filename, int flags,
/* section heading line */
char *end = memchr(buffer, ']', len);
if (end == NULL) {
- printf("Error line %d - no terminating '['"
+ printf("Error line %d - no terminating ']'"
"character found\n", lineno);
goto error1;
}
*end = '\0';
_strip(&buffer[1], end - &buffer[1]);
- /* close off old section and add start new one */
- if (curr_section >= 0)
- cfg->sections[curr_section]->num_entries =
- curr_entry + 1;
- curr_section++;
-
- /* resize overall struct if we don't have room for more
- sections */
- if (curr_section == allocated_sections) {
- allocated_sections += CFG_ALLOC_SECTION_BATCH;
- struct rte_cfgfile *n_cfg = realloc(cfg,
- sizeof(*cfg) + sizeof(cfg->sections[0])
- * allocated_sections);
- if (n_cfg == NULL) {
- curr_section--;
- printf("Error - no more memory\n");
- goto error1;
- }
- cfg = n_cfg;
- }
-
- /* allocate space for new section */
- allocated_entries = CFG_ALLOC_ENTRY_BATCH;
- curr_entry = -1;
- cfg->sections[curr_section] = malloc(
- sizeof(*cfg->sections[0]) +
- sizeof(cfg->sections[0]->entries[0]) *
- allocated_entries);
- if (cfg->sections[curr_section] == NULL) {
- printf("Error - no more memory\n");
- goto error1;
- }
-
- snprintf(cfg->sections[curr_section]->name,
- sizeof(cfg->sections[0]->name),
- "%s", &buffer[1]);
+ rte_cfgfile_add_section(cfg, &buffer[1]);
} else {
- /* value line */
- if (curr_section < 0) {
- printf("Error line %d - value outside of"
- "section\n", lineno);
+ /* key and value line */
+ char *split[2] = {NULL};
+
+ split[0] = buffer;
+ split[1] = memchr(buffer, '=', len);
+ if (split[1] == NULL) {
+ printf("Error line %d - no '='"
+ "character found\n", lineno);
goto error1;
}
-
- struct rte_cfgfile_section *sect =
- cfg->sections[curr_section];
- int n;
- char *split[2] = {NULL};
- n = rte_strsplit(buffer, sizeof(buffer), split, 2, '=');
- if (flags & CFG_FLAG_EMPTY_VALUES) {
- if ((n < 1) || (n > 2)) {
- printf("Error at line %d - cannot split string, n=%d\n",
- lineno, n);
- goto error1;
- }
- } else {
- if (n != 2) {
- printf("Error at line %d - cannot split string, n=%d\n",
- lineno, n);
- goto error1;
- }
+ *split[1] = '\0';
+ split[1]++;
+
+ _strip(split[0], strlen(split[0]));
+ _strip(split[1], strlen(split[1]));
+ char *end = memchr(split[1], '\\', strlen(split[1]));
+
+ while (end != NULL) {
+ if (*(end+1) == params->comment_character) {
+ *end = '\0';
+ strcat(split[1], end+1);
+ } else
+ end++;
+ end = memchr(end, '\\', strlen(end));
}
- curr_entry++;
- if (curr_entry == allocated_entries) {
- allocated_entries += CFG_ALLOC_ENTRY_BATCH;
- struct rte_cfgfile_section *n_sect = realloc(
- sect, sizeof(*sect) +
- sizeof(sect->entries[0]) *
- allocated_entries);
- if (n_sect == NULL) {
- curr_entry--;
- printf("Error - no more memory\n");
- goto error1;
- }
- sect = cfg->sections[curr_section] = n_sect;
+ if (!(flags & CFG_FLAG_EMPTY_VALUES) &&
+ (*split[1] == '\0')) {
+ printf("Error at line %d - cannot use empty "
+ "values\n", lineno);
+ goto error1;
}
- sect->entries[curr_entry] = malloc(
- sizeof(*sect->entries[0]));
- if (sect->entries[curr_entry] == NULL) {
- printf("Error - no more memory\n");
+ if (cfg->num_sections == 0)
goto error1;
- }
- struct rte_cfgfile_entry *entry = sect->entries[
- curr_entry];
- snprintf(entry->name, sizeof(entry->name), "%s",
- split[0]);
- snprintf(entry->value, sizeof(entry->value), "%s",
- split[1] ? split[1] : "");
- _strip(entry->name, strnlen(entry->name,
- sizeof(entry->name)));
- _strip(entry->value, strnlen(entry->value,
- sizeof(entry->value)));
+ _add_entry(&cfg->sections[cfg->num_sections - 1],
+ split[0], split[1]);
}
}
fclose(f);
- cfg->flags = flags;
- cfg->num_sections = curr_section + 1;
- /* curr_section will still be -1 if we have an empty file */
- if (curr_section >= 0)
- cfg->sections[curr_section]->num_entries = curr_entry + 1;
return cfg;
-
error1:
- cfg->num_sections = curr_section + 1;
- if (curr_section >= 0)
- cfg->sections[curr_section]->num_entries = curr_entry + 1;
rte_cfgfile_close(cfg);
-error2:
fclose(f);
return NULL;
}
+struct rte_cfgfile *
+rte_cfgfile_create(int flags)
+{
+ int i;
+ struct rte_cfgfile *cfg = NULL;
-int rte_cfgfile_close(struct rte_cfgfile *cfg)
+ cfg = malloc(sizeof(*cfg));
+
+ if (cfg == NULL)
+ return NULL;
+
+ cfg->flags = flags;
+ cfg->num_sections = 0;
+
+ /* allocate first batch of sections and entries */
+ cfg->sections = malloc(sizeof(struct rte_cfgfile_section) *
+ CFG_ALLOC_SECTION_BATCH);
+
+ if (cfg->sections == NULL)
+ goto error1;
+
+ cfg->allocated_sections = CFG_ALLOC_SECTION_BATCH;
+
+ for (i = 0; i < CFG_ALLOC_SECTION_BATCH; i++) {
+ cfg->sections[i].entries = malloc(sizeof(
+ struct rte_cfgfile_entry) * CFG_ALLOC_ENTRY_BATCH);
+
+ if (cfg->sections[i].entries == NULL)
+ goto error1;
+
+ cfg->sections[i].num_entries = 0;
+ cfg->sections[i].allocated_entries = CFG_ALLOC_ENTRY_BATCH;
+ }
+
+ if (flags & CFG_FLAG_GLOBAL_SECTION)
+ rte_cfgfile_add_section(cfg, "GLOBAL");
+
+ return cfg;
+error1:
+ if (cfg->sections != NULL) {
+ for (i = 0; i < cfg->allocated_sections; i++) {
+ if (cfg->sections[i].entries != NULL) {
+ free(cfg->sections[i].entries);
+ cfg->sections[i].entries = NULL;
+ }
+ }
+ free(cfg->sections);
+ cfg->sections = NULL;
+ }
+ free(cfg);
+ return NULL;
+}
+
+int
+rte_cfgfile_add_section(struct rte_cfgfile *cfg, const char *sectionname)
+{
+ int i;
+
+ if (cfg == NULL)
+ return -EINVAL;
+
+ if (sectionname == NULL)
+ return -EINVAL;
+
+ /* resize overall struct if we don't have room for more sections */
+ if (cfg->num_sections == cfg->allocated_sections) {
+
+ struct rte_cfgfile_section *n_sections =
+ realloc(cfg->sections,
+ sizeof(struct rte_cfgfile_section) *
+ ((cfg->allocated_sections) +
+ CFG_ALLOC_SECTION_BATCH));
+
+ if (n_sections == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < CFG_ALLOC_SECTION_BATCH; i++) {
+ n_sections[i + cfg->allocated_sections].num_entries = 0;
+ n_sections[i +
+ cfg->allocated_sections].allocated_entries = 0;
+ n_sections[i + cfg->allocated_sections].entries = NULL;
+ }
+ cfg->sections = n_sections;
+ cfg->allocated_sections += CFG_ALLOC_SECTION_BATCH;
+ }
+
+ snprintf(cfg->sections[cfg->num_sections].name,
+ sizeof(cfg->sections[0].name), "%s", sectionname);
+ cfg->sections[cfg->num_sections].num_entries = 0;
+ cfg->num_sections++;
+
+ return 0;
+}
+
+int rte_cfgfile_add_entry(struct rte_cfgfile *cfg,
+ const char *sectionname, const char *entryname,
+ const char *entryvalue)
+{
+ int ret;
+
+ if ((cfg == NULL) || (sectionname == NULL) || (entryname == NULL)
+ || (entryvalue == NULL))
+ return -EINVAL;
+
+ if (rte_cfgfile_has_entry(cfg, sectionname, entryname) != 0)
+ return -EEXIST;
+
+ /* search for section pointer by sectionname */
+ struct rte_cfgfile_section *curr_section = _get_section(cfg,
+ sectionname);
+ if (curr_section == NULL)
+ return -EINVAL;
+
+ ret = _add_entry(curr_section, entryname, entryvalue);
+
+ return ret;
+}
+
+int rte_cfgfile_set_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ const char *entryname, const char *entryvalue)
+{
+ int i;
+
+ if ((cfg == NULL) || (sectionname == NULL) || (entryname == NULL))
+ return -EINVAL;
+
+ /* search for section pointer by sectionname */
+ struct rte_cfgfile_section *curr_section = _get_section(cfg,
+ sectionname);
+ if (curr_section == NULL)
+ return -EINVAL;
+
+ if (entryvalue == NULL)
+ entryvalue = "";
+
+ for (i = 0; i < curr_section->num_entries; i++)
+ if (!strcmp(curr_section->entries[i].name, entryname)) {
+ snprintf(curr_section->entries[i].value,
+ sizeof(curr_section->entries[i].value),
+ "%s", entryvalue);
+ return 0;
+ }
+ printf("Error - entry name doesn't exist\n");
+ return -EINVAL;
+}
+
+int rte_cfgfile_save(struct rte_cfgfile *cfg, const char *filename)
{
int i, j;
+ if ((cfg == NULL) || (filename == NULL))
+ return -EINVAL;
+
+ FILE *f = fopen(filename, "w");
+
+ if (f == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_sections; i++) {
+ fprintf(f, "[%s]\n", cfg->sections[i].name);
+
+ for (j = 0; j < cfg->sections[i].num_entries; j++) {
+ fprintf(f, "%s=%s\n",
+ cfg->sections[i].entries[j].name,
+ cfg->sections[i].entries[j].value);
+ }
+ }
+ return fclose(f);
+}
+
+int rte_cfgfile_close(struct rte_cfgfile *cfg)
+{
+ int i;
+
if (cfg == NULL)
return -1;
- for (i = 0; i < cfg->num_sections; i++) {
- if (cfg->sections[i] != NULL) {
- if (cfg->sections[i]->num_entries) {
- for (j = 0; j < cfg->sections[i]->num_entries;
- j++) {
- if (cfg->sections[i]->entries[j] !=
- NULL)
- free(cfg->sections[i]->
- entries[j]);
- }
+ if (cfg->sections != NULL) {
+ for (i = 0; i < cfg->allocated_sections; i++) {
+ if (cfg->sections[i].entries != NULL) {
+ free(cfg->sections[i].entries);
+ cfg->sections[i].entries = NULL;
}
- free(cfg->sections[i]);
}
+ free(cfg->sections);
+ cfg->sections = NULL;
}
free(cfg);
+ cfg = NULL;
return 0;
}
@@ -361,7 +484,7 @@ size_t length)
int i;
int num_sections = 0;
for (i = 0; i < cfg->num_sections; i++) {
- if (strncmp(cfg->sections[i]->name, sectionname, length) == 0)
+ if (strncmp(cfg->sections[i].name, sectionname, length) == 0)
num_sections++;
}
return num_sections;
@@ -375,23 +498,11 @@ rte_cfgfile_sections(struct rte_cfgfile *cfg, char *sections[],
for (i = 0; i < cfg->num_sections && i < max_sections; i++)
snprintf(sections[i], CFG_NAME_LEN, "%s",
- cfg->sections[i]->name);
+ cfg->sections[i].name);
return i;
}
-static const struct rte_cfgfile_section *
-_get_section(struct rte_cfgfile *cfg, const char *sectionname)
-{
- int i;
- for (i = 0; i < cfg->num_sections; i++) {
- if (strncmp(cfg->sections[i]->name, sectionname,
- sizeof(cfg->sections[0]->name)) == 0)
- return cfg->sections[i];
- }
- return NULL;
-}
-
int
rte_cfgfile_has_section(struct rte_cfgfile *cfg, const char *sectionname)
{
@@ -408,7 +519,18 @@ rte_cfgfile_section_num_entries(struct rte_cfgfile *cfg,
return s->num_entries;
}
+int
+rte_cfgfile_section_num_entries_by_index(struct rte_cfgfile *cfg,
+ char *sectionname, int index)
+{
+ if (index < 0 || index >= cfg->num_sections)
+ return -1;
+ const struct rte_cfgfile_section *sect = &(cfg->sections[index]);
+
+ snprintf(sectionname, CFG_NAME_LEN, "%s", sect->name);
+ return sect->num_entries;
+}
int
rte_cfgfile_section_entries(struct rte_cfgfile *cfg, const char *sectionname,
struct rte_cfgfile_entry *entries, int max_entries)
@@ -418,7 +540,7 @@ rte_cfgfile_section_entries(struct rte_cfgfile *cfg, const char *sectionname,
if (sect == NULL)
return -1;
for (i = 0; i < max_entries && i < sect->num_entries; i++)
- entries[i] = *sect->entries[i];
+ entries[i] = sect->entries[i];
return i;
}
@@ -432,11 +554,10 @@ rte_cfgfile_section_entries_by_index(struct rte_cfgfile *cfg, int index,
if (index < 0 || index >= cfg->num_sections)
return -1;
-
- sect = cfg->sections[index];
+ sect = &cfg->sections[index];
snprintf(sectionname, CFG_NAME_LEN, "%s", sect->name);
for (i = 0; i < max_entries && i < sect->num_entries; i++)
- entries[i] = *sect->entries[i];
+ entries[i] = sect->entries[i];
return i;
}
@@ -449,9 +570,9 @@ rte_cfgfile_get_entry(struct rte_cfgfile *cfg, const char *sectionname,
if (sect == NULL)
return NULL;
for (i = 0; i < sect->num_entries; i++)
- if (strncmp(sect->entries[i]->name, entryname, CFG_NAME_LEN)
- == 0)
- return sect->entries[i]->value;
+ if (strncmp(sect->entries[i].name, entryname, CFG_NAME_LEN)
+ == 0)
+ return sect->entries[i].value;
return NULL;
}
diff --git a/lib/librte_cfgfile/rte_cfgfile.h b/lib/librte_cfgfile/rte_cfgfile.h
index fa10d408..17f72757 100644
--- a/lib/librte_cfgfile/rte_cfgfile.h
+++ b/lib/librte_cfgfile/rte_cfgfile.h
@@ -121,6 +121,82 @@ struct rte_cfgfile *rte_cfgfile_load_with_params(const char *filename,
int flags, const struct rte_cfgfile_parameters *params);
/**
+ * Create new cfgfile instance with empty sections and entries
+ *
+ * @param flags
+ * - CFG_FLAG_GLOBAL_SECTION
+ * Indicates that the file supports key value entries before the first
+ * defined section. These entries can be accessed in the "GLOBAL"
+ * section.
+ * - CFG_FLAG_EMPTY_VALUES
+ * Indicates that file supports key value entries where the value can
+ * be zero length (e.g., "key=").
+ * @return
+ * Handle to cfgfile instance on success, NULL otherwise
+ */
+struct rte_cfgfile *rte_cfgfile_create(int flags);
+
+/**
+ * Add section in cfgfile instance.
+ *
+ * @param cfg
+ * Pointer to the cfgfile structure.
+ * @param sectionname
+ * Section name which will be add to cfgfile.
+ * @return
+ * 0 on success, -ENOMEM if can't add section
+ */
+int
+rte_cfgfile_add_section(struct rte_cfgfile *cfg, const char *sectionname);
+
+/**
+ * Add entry to specified section in cfgfile instance.
+ *
+ * @param cfg
+ * Pointer to the cfgfile structure.
+ * @param sectionname
+ * Given section name to add an entry.
+ * @param entryname
+ * Entry name to add.
+ * @param entryvalue
+ * Entry value to add.
+ * @return
+ * 0 on success, -EEXIST if entry already exist, -EINVAL if bad argument
+ */
+int rte_cfgfile_add_entry(struct rte_cfgfile *cfg,
+ const char *sectionname, const char *entryname,
+ const char *entryvalue);
+
+/**
+ * Update value of specified entry name in given section in config file
+ *
+ * @param cfg
+ * Config file
+ * @param sectionname
+ * Section name
+ * @param entryname
+ * Entry name to look for the value change
+ * @param entryvalue
+ * New entry value. Can be also an empty string if CFG_FLAG_EMPTY_VALUES = 1
+ * @return
+ * 0 on success, -EINVAL if bad argument
+ */
+int rte_cfgfile_set_entry(struct rte_cfgfile *cfg, const char *sectionname,
+ const char *entryname, const char *entryvalue);
+
+/**
+ * Save object cfgfile to file on disc
+ *
+ * @param cfg
+ * Config file structure
+ * @param filename
+ * File name to save data
+ * @return
+ * 0 on success, errno otherwise
+ */
+int rte_cfgfile_save(struct rte_cfgfile *cfg, const char *filename);
+
+/**
* Get number of sections in config file
*
* @param cfg
@@ -184,6 +260,26 @@ int rte_cfgfile_section_num_entries(struct rte_cfgfile *cfg,
const char *sectionname);
/**
+* Get number of entries in given config file section
+*
+* The index of a section is the same as the index of its name in the
+* result of rte_cfgfile_sections. This API can be used when there are
+* multiple sections with the same name.
+*
+* @param cfg
+* Config file
+* @param sectionname
+* Section name
+* @param index
+* Section index
+* @return
+* Number of entries in section on success, -1 otherwise
+*/
+int rte_cfgfile_section_num_entries_by_index(struct rte_cfgfile *cfg,
+ char *sectionname,
+ int index);
+
+/**
* Get section entries as key-value pairs
*
* If multiple sections have the given name this function operates on the
diff --git a/lib/librte_cfgfile/rte_cfgfile_version.map b/lib/librte_cfgfile/rte_cfgfile_version.map
index 5fe60f72..cc4a11f6 100644
--- a/lib/librte_cfgfile/rte_cfgfile_version.map
+++ b/lib/librte_cfgfile/rte_cfgfile_version.map
@@ -27,3 +27,14 @@ DPDK_17.05 {
rte_cfgfile_load_with_params;
} DPDK_16.04;
+
+DPDK_17.11 {
+ global:
+
+ rte_cfgfile_add_entry;
+ rte_cfgfile_add_section;
+ rte_cfgfile_create;
+ rte_cfgfile_save;
+ rte_cfgfile_set_entry;
+
+} DPDK_17.05;
diff --git a/lib/librte_cmdline/Makefile b/lib/librte_cmdline/Makefile
index 644f68e4..2c48e62b 100644
--- a/lib/librte_cmdline/Makefile
+++ b/lib/librte_cmdline/Makefile
@@ -54,6 +54,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_socket.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_portlist.c
CFLAGS += -D_GNU_SOURCE
+LDLIBS += -lrte_eal
# install includes
INCS := cmdline.h cmdline_parse.h cmdline_parse_num.h cmdline_parse_ipaddr.h
diff --git a/lib/librte_cmdline/cmdline.c b/lib/librte_cmdline/cmdline.c
index a9c47be3..d7491651 100644
--- a/lib/librte_cmdline/cmdline.c
+++ b/lib/librte_cmdline/cmdline.c
@@ -205,7 +205,8 @@ cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
}
if (ret >= BUFSIZ)
ret = BUFSIZ - 1;
- write(cl->s_out, buf, ret);
+ ret = write(cl->s_out, buf, ret);
+ (void)ret;
free(buf);
#endif
}
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index 56491eac..3e12ee54 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -163,7 +163,7 @@ static int
match_inst(cmdline_parse_inst_t *inst, const char *buf,
unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
{
- cmdline_parse_token_hdr_t * token_p;
+ cmdline_parse_token_hdr_t *token_p = NULL;
unsigned int i=0;
int n = 0;
struct cmdline_token_hdr token_hdr;
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 6ac331bc..8e780b83 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -34,11 +34,13 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_cryptodev.a
# library version
-LIBABIVER := 3
+LIBABIVER := 4
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_kvargs
# library source files
SRCS-y += rte_cryptodev.c rte_cryptodev_pmd.c
@@ -48,8 +50,6 @@ SYMLINK-y-include += rte_crypto.h
SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
-SYMLINK-y-include += rte_cryptodev_vdev.h
-SYMLINK-y-include += rte_cryptodev_pci.h
# versioning export map
EXPORT_MAP := rte_cryptodev_version.map
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 10fe0804..3d672fe7 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -86,7 +86,8 @@ enum rte_crypto_op_status {
*/
enum rte_crypto_op_sess_type {
RTE_CRYPTO_OP_WITH_SESSION, /**< Session based crypto operation */
- RTE_CRYPTO_OP_SESSIONLESS /**< Session-less crypto operation */
+ RTE_CRYPTO_OP_SESSIONLESS, /**< Session-less crypto operation */
+ RTE_CRYPTO_OP_SECURITY_SESSION /**< Security session crypto operation */
};
/**
@@ -117,7 +118,7 @@ struct rte_crypto_op {
struct rte_mempool *mempool;
/**< crypto operation mempool which operation is allocated from */
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< physical address of crypto operation */
RTE_STD_C11
@@ -144,6 +145,7 @@ __rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
__rte_crypto_sym_op_reset(op->sym);
break;
+ case RTE_CRYPTO_OP_TYPE_UNDEFINED:
default:
break;
}
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 0ceaa917..c981f0b9 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -160,9 +160,6 @@ struct rte_crypto_cipher_xform {
* Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
* 192 bits (24 bytes) or 256 bits (32 bytes).
*
- * For the CCM mode of operation, the only supported key length is 128
- * bits (16 bytes).
- *
* For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
* should be set to the combined length of the encryption key and the
* keymask. Since the keymask and the encryption key are the same size,
@@ -196,7 +193,9 @@ struct rte_crypto_cipher_xform {
* space for the implementation to write in the flags
* in the first byte). Note that a full 16 bytes should
* be allocated, even though the length field will
- * have a value less than this.
+ * have a value less than this. Note that the PMDs may
+ * modify the memory reserved (the first byte and the
+ * final padding)
*
* - For AES-XTS, this is the 128bit tweak, i, from
* IEEE Std 1619-2007.
@@ -427,7 +426,11 @@ struct rte_crypto_aead_xform {
uint16_t digest_length;
uint16_t aad_length;
- /**< The length of the additional authenticated data (AAD) in bytes. */
+ /**< The length of the additional authenticated data (AAD) in bytes.
+ * For CCM mode, this is the length of the actual AAD, even though
+ * it is required to reserve 18 bytes before the AAD and padding
+ * at the end of it, so a multiple of 16 bytes is allocated.
+ */
};
/** Crypto transformation types */
@@ -505,6 +508,8 @@ struct rte_crypto_sym_op {
/**< Handle for the initialised session context */
struct rte_crypto_sym_xform *xform;
/**< Session-less API crypto operation parameters */
+ struct rte_security_session *sec_session;
+ /**< Handle for the initialised security session context */
};
RTE_STD_C11
@@ -543,7 +548,7 @@ struct rte_crypto_sym_op {
* For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
* "digest result" read "authentication tag T".
*/
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of digest */
} digest; /**< Digest parameters */
struct {
@@ -555,20 +560,19 @@ struct rte_crypto_sym_op {
* Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
* the caller should setup this field as follows:
*
- * - the nonce should be written starting at an offset
- * of one byte into the array, leaving room for the
- * implementation to write in the flags to the first
- * byte.
- *
- * - the additional authentication data itself should
+ * - the additional authentication data itself should
* be written starting at an offset of 18 bytes into
- * the array, leaving room for the length encoding in
- * the first two bytes of the second block.
+ * the array, leaving room for the first block (16 bytes)
+ * and the length encoding in the first two bytes of the
+ * second block.
*
* - the array should be big enough to hold the above
- * fields, plus any padding to round this up to the
- * nearest multiple of the block size (16 bytes).
- * Padding will be added by the implementation.
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * - Note that PMDs may modify the memory reserved
+ * (first 18 bytes and the final padding).
*
* Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
* caller should setup this field as follows:
@@ -579,7 +583,7 @@ struct rte_crypto_sym_op {
* of the block size (16 bytes).
*
*/
- phys_addr_t phys_addr; /**< physical address */
+ rte_iova_t phys_addr; /**< physical address */
} aad;
/**< Additional authentication parameters */
} aead;
@@ -676,7 +680,7 @@ struct rte_crypto_sym_op {
* will overwrite any data at this location.
*
*/
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
/**< Physical address of digest */
} digest; /**< Digest parameters */
} auth;
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 327d7e84..b40c0282 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -377,12 +377,6 @@ rte_cryptodev_get_feature_name(uint64_t flag)
}
}
-int
-rte_cryptodev_create_vdev(const char *name, const char *args)
-{
- return rte_vdev_init(name, args);
-}
-
struct rte_cryptodev *
rte_cryptodev_pmd_get_dev(uint8_t dev_id)
{
@@ -488,6 +482,16 @@ rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
return count;
}
+void *
+rte_cryptodev_get_sec_ctx(uint8_t dev_id)
+{
+ if (rte_crypto_devices[dev_id].feature_flags &
+ RTE_CRYPTODEV_FF_SECURITY)
+ return rte_crypto_devices[dev_id].security_ctx;
+
+ return NULL;
+}
+
int
rte_cryptodev_socket_id(uint8_t dev_id)
{
@@ -583,6 +587,9 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id)
cryptodev->data->socket_id = socket_id;
cryptodev->data->dev_started = 0;
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
cryptodev_globals.nb_devs++;
@@ -1271,7 +1278,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
__rte_crypto_op_reset(op, type);
- op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->phys_addr = rte_mem_virt2iova(_op_data);
op->mempool = mempool;
}
@@ -1362,12 +1369,6 @@ TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
static struct cryptodev_driver_list cryptodev_driver_list =
TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
-struct cryptodev_driver {
- TAILQ_ENTRY(cryptodev_driver) next; /**< Next in list. */
- const struct rte_driver *driver;
- uint8_t id;
-};
-
int
rte_cryptodev_driver_id_get(const char *name)
{
@@ -1388,6 +1389,17 @@ rte_cryptodev_driver_id_get(const char *name)
}
const char *
+rte_cryptodev_name_get(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (dev == NULL)
+ return NULL;
+
+ return dev->data->name;
+}
+
+const char *
rte_cryptodev_driver_name_get(uint8_t driver_id)
{
struct cryptodev_driver *driver;
@@ -1399,15 +1411,13 @@ rte_cryptodev_driver_name_get(uint8_t driver_id)
}
uint8_t
-rte_cryptodev_allocate_driver(const struct rte_driver *drv)
+rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
+ const struct rte_driver *drv)
{
- struct cryptodev_driver *driver;
-
- driver = malloc(sizeof(*driver));
- driver->driver = drv;
- driver->id = nb_drivers;
+ crypto_drv->driver = drv;
+ crypto_drv->id = nb_drivers;
- TAILQ_INSERT_TAIL(&cryptodev_driver_list, driver, next);
+ TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
return nb_drivers++;
}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 7ec9c4bc..dade5548 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -49,7 +49,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_dev.h"
#include <rte_common.h>
-#include <rte_vdev.h>
extern const char **rte_cyptodev_names;
@@ -60,10 +59,10 @@ extern const char **rte_cyptodev_names;
RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
-#define CDEV_PMD_LOG_ERR(dev, ...) \
- RTE_LOG(ERR, CRYPTODEV, \
- RTE_FMT("[%s] %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
- dev, __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+#define CDEV_LOG_INFO(...) \
+ RTE_LOG(INFO, CRYPTODEV, \
+ RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define CDEV_LOG_DEBUG(...) \
@@ -111,7 +110,7 @@ extern const char **rte_cyptodev_names;
* to calculate address from.
*/
#define rte_crypto_op_ctophys_offset(c, o) \
- (phys_addr_t)((c)->phys_addr + (o))
+ (rte_iova_t)((c)->phys_addr + (o))
/**
* Crypto parameters range description
@@ -351,6 +350,8 @@ rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
/**< Utilises CPU NEON instructions */
#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 11)
/**< Utilises ARM CPU Cryptographic Extensions */
+#define RTE_CRYPTODEV_FF_SECURITY (1ULL << 12)
+/**< Support Security Protocol Processing */
/**
@@ -434,33 +435,29 @@ struct rte_cryptodev_stats {
/**< Max length of name of crypto PMD */
/**
- * @deprecated
- *
- * Create a virtual crypto device
+ * Get the device identifier for the named crypto device.
*
- * @param name Cryptodev PMD name of device to be created.
- * @param args Options arguments for device.
+ * @param name device name to select the device structure.
*
* @return
- * - On successful creation of the cryptodev the device index is returned,
- * which will be between 0 and rte_cryptodev_count().
- * - In the case of a failure, returns -1.
+ * - Returns crypto device identifier on success.
+ * - Return -1 on failure to find named crypto device.
*/
-__rte_deprecated
extern int
-rte_cryptodev_create_vdev(const char *name, const char *args);
+rte_cryptodev_get_dev_id(const char *name);
/**
- * Get the device identifier for the named crypto device.
+ * Get the crypto device name given a device identifier.
*
- * @param name device name to select the device structure.
+ * @param dev_id
+ * The identifier of the device
*
* @return
- * - Returns crypto device identifier on success.
- * - Return -1 on failure to find named crypto device.
+ * - Returns crypto device name.
+ * - Returns NULL if crypto device is not present.
*/
-extern int
-rte_cryptodev_get_dev_id(const char *name);
+extern const char *
+rte_cryptodev_name_get(uint8_t dev_id);
/**
* Get the total number of crypto devices that have been successfully
@@ -676,6 +673,11 @@ rte_cryptodev_stats_reset(uint8_t dev_id);
* @param dev_info A pointer to a structure of type
* *rte_cryptodev_info* to be filled with the
* contextual information of the device.
+ *
+ * @note The capabilities field of dev_info is set to point to the first
+ * element of an array of struct rte_cryptodev_capabilities. The element after
+ * the last valid element has it's op field set to
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED.
*/
extern void
rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
@@ -756,11 +758,17 @@ struct rte_cryptodev {
struct rte_cryptodev_cb_list link_intr_cbs;
/**< User application callback for interrupts if present */
+ void *security_ctx;
+ /**< Context for security ops */
+
__extension__
uint8_t attached : 1;
/**< Flag indicating the device is attached */
} __rte_cache_aligned;
+void *
+rte_cryptodev_get_sec_ctx(uint8_t dev_id);
+
/**
*
* The data part, with no function pointers, associated with each device.
@@ -1025,26 +1033,6 @@ int rte_cryptodev_driver_id_get(const char *name);
*/
const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
-/**
- * @internal
- * Allocate Cryptodev driver.
- *
- * @param driver
- * Pointer to rte_driver.
- * @return
- * The driver type identifier
- */
-uint8_t rte_cryptodev_allocate_driver(const struct rte_driver *driver);
-
-
-#define RTE_PMD_REGISTER_CRYPTO_DRIVER(drv, driver_id)\
-RTE_INIT(init_ ##driver_id);\
-static void init_ ##driver_id(void)\
-{\
- driver_id = rte_cryptodev_allocate_driver(&(drv).driver);\
-}
-
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_pci.h b/lib/librte_cryptodev/rte_cryptodev_pci.h
deleted file mode 100644
index 67eda96a..00000000
--- a/lib/librte_cryptodev/rte_cryptodev_pci.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_CRYPTODEV_PCI_H_
-#define _RTE_CRYPTODEV_PCI_H_
-
-#include <rte_pci.h>
-#include "rte_cryptodev.h"
-
-/**
- * Initialisation function of a crypto driver invoked for each matching
- * crypto PCI device detected during the PCI probing phase.
- *
- * @param dev The dev pointer is the address of the *rte_cryptodev*
- * structure associated with the matching device and which
- * has been [automatically] allocated in the
- * *rte_crypto_devices* array.
- *
- * @return
- * - 0: Success, the device is properly initialised by the driver.
- * In particular, the driver MUST have set up the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*cryptodev_pci_init_t)(struct rte_cryptodev *dev);
-
-/**
- * Finalisation function of a driver invoked for each matching
- * PCI device detected during the PCI closing phase.
- *
- * @param dev The dev pointer is the address of the *rte_cryptodev*
- * structure associated with the matching device and which
- * has been [automatically] allocated in the
- * *rte_crypto_devices* array.
- *
- * * @return
- * - 0: Success, the device is properly finalised by the driver.
- * In particular, the driver MUST free the *dev_ops* pointer
- * of the *dev* structure.
- * - <0: Error code of the device initialisation failure.
- */
-typedef int (*cryptodev_pci_uninit_t)(struct rte_cryptodev *dev);
-
-/**
- * @internal
- * Wrapper for use by pci drivers as a .probe function to attach to a crypto
- * interface.
- */
-int
-rte_cryptodev_pci_generic_probe(struct rte_pci_device *pci_dev,
- size_t private_data_size,
- cryptodev_pci_init_t dev_init);
-
-/**
- * @internal
- * Wrapper for use by pci drivers as a .remove function to detach a crypto
- * interface.
- */
-int
-rte_cryptodev_pci_generic_remove(struct rte_pci_device *pci_dev,
- cryptodev_pci_uninit_t dev_uninit);
-
-#endif /* _RTE_CRYPTODEV_PCI_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.c b/lib/librte_cryptodev/rte_cryptodev_pmd.c
index a57faadc..b4eeb448 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.c
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.c
@@ -32,84 +32,48 @@
#include <rte_malloc.h>
-#include "rte_cryptodev_vdev.h"
-#include "rte_cryptodev_pci.h"
#include "rte_cryptodev_pmd.h"
/**
* Parse name from argument
*/
static int
-rte_cryptodev_vdev_parse_name_arg(const char *key __rte_unused,
+rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
- struct rte_crypto_vdev_init_params *params = extra_args;
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+ int n;
- if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CDEV_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes", value,
- RTE_CRYPTODEV_NAME_MAX_LEN - 1);
- return -1;
- }
-
- strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+ n = snprintf(params->name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s", value);
+ if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
+ return -EINVAL;
return 0;
}
/**
- * Parse integer from argument
+ * Parse unsigned integer from argument
*/
static int
-rte_cryptodev_vdev_parse_integer_arg(const char *key __rte_unused,
+rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
- int *i = extra_args;
+ int i;
+ char *end;
+ errno = 0;
- *i = atoi(value);
- if (*i < 0) {
- CDEV_LOG_ERR("Argument has to be positive.");
- return -1;
- }
+ i = strtol(value, &end, 10);
+ if (*end != 0 || errno != 0 || i < 0)
+ return -EINVAL;
+ *((uint32_t *)extra_args) = i;
return 0;
}
-struct rte_cryptodev *
-rte_cryptodev_vdev_pmd_init(const char *name, size_t dev_private_size,
- int socket_id, struct rte_vdev_device *vdev)
-{
- struct rte_cryptodev *cryptodev;
-
- /* allocate device structure */
- cryptodev = rte_cryptodev_pmd_allocate(name, socket_id);
- if (cryptodev == NULL)
- return NULL;
-
- /* allocate private device structure */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- cryptodev->data->dev_private =
- rte_zmalloc_socket("cryptodev device private",
- dev_private_size,
- RTE_CACHE_LINE_SIZE,
- socket_id);
-
- if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private device"
- " data");
- }
-
- cryptodev->device = &vdev->device;
-
- /* initialise user call-back tail queue */
- TAILQ_INIT(&(cryptodev->link_intr_cbs));
-
- return cryptodev;
-}
-
int
-rte_cryptodev_vdev_parse_init_params(struct rte_crypto_vdev_init_params *params,
- const char *input_args)
+rte_cryptodev_pmd_parse_input_args(
+ struct rte_cryptodev_pmd_init_params *params,
+ const char *args)
{
struct rte_kvargs *kvlist = NULL;
int ret = 0;
@@ -117,35 +81,36 @@ rte_cryptodev_vdev_parse_init_params(struct rte_crypto_vdev_init_params *params,
if (params == NULL)
return -EINVAL;
- if (input_args) {
- kvlist = rte_kvargs_parse(input_args,
- cryptodev_vdev_valid_params);
+ if (args) {
+ kvlist = rte_kvargs_parse(args, cryptodev_pmd_valid_params);
if (kvlist == NULL)
- return -1;
+ return -EINVAL;
ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- &rte_cryptodev_vdev_parse_integer_arg,
- &params->max_nb_queue_pairs);
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &rte_cryptodev_pmd_parse_uint_arg,
+ &params->max_nb_queue_pairs);
if (ret < 0)
goto free_kvlist;
ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &rte_cryptodev_vdev_parse_integer_arg,
- &params->max_nb_sessions);
+ RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG,
+ &rte_cryptodev_pmd_parse_uint_arg,
+ &params->max_nb_sessions);
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
- &rte_cryptodev_vdev_parse_integer_arg,
- &params->socket_id);
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &rte_cryptodev_pmd_parse_uint_arg,
+ &params->socket_id);
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
- &rte_cryptodev_vdev_parse_name_arg,
- params);
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &rte_cryptodev_pmd_parse_name_arg,
+ params);
if (ret < 0)
goto free_kvlist;
}
@@ -155,93 +120,80 @@ free_kvlist:
return ret;
}
-int
-rte_cryptodev_pci_generic_probe(struct rte_pci_device *pci_dev,
- size_t private_data_size,
- cryptodev_pci_init_t dev_init)
+struct rte_cryptodev *
+rte_cryptodev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_cryptodev_pmd_init_params *params)
{
struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ if (params->name[0] != '\0') {
+ CDEV_LOG_INFO("[%s] User specified device name = %s\n",
+ device->driver->name, params->name);
+ name = params->name;
+ }
- int retval;
+ CDEV_LOG_INFO("[%s] - Creating cryptodev %s\n",
+ device->driver->name, name);
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
+ CDEV_LOG_INFO("[%s] - Initialisation parameters - name: %s,"
+ "socket id: %d, max queue pairs: %u, max sessions: %u",
+ device->driver->name, name,
+ params->socket_id, params->max_nb_queue_pairs,
+ params->max_nb_sessions);
- cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
- if (cryptodev == NULL)
- return -ENOMEM;
+ /* allocate device structure */
+ cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
+ if (cryptodev == NULL) {
+ CDEV_LOG_ERR("[%s] Failed to allocate crypto device for %s",
+ device->driver->name, name);
+ return NULL;
+ }
+ /* allocate private device structure */
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
cryptodev->data->dev_private =
- rte_zmalloc_socket(
- "cryptodev private structure",
- private_data_size,
+ rte_zmalloc_socket("cryptodev device private",
+ params->private_data_size,
RTE_CACHE_LINE_SIZE,
- rte_socket_id());
+ params->socket_id);
+
+ if (cryptodev->data->dev_private == NULL) {
+ CDEV_LOG_ERR("[%s] Cannot allocate memory for "
+ "cryptodev %s private data",
+ device->driver->name, name);
- if (cryptodev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private "
- "device data");
+ rte_cryptodev_pmd_release_device(cryptodev);
+ return NULL;
+ }
}
- cryptodev->device = &pci_dev->device;
+ cryptodev->device = device;
- /* init user callbacks */
+ /* initialise user call-back tail queue */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
- /* Invoke PMD device initialization function */
- RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL);
- retval = dev_init(cryptodev);
- if (retval == 0)
- return 0;
-
- CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)"
- " failed", pci_dev->device.driver->name,
- (unsigned int) pci_dev->id.vendor_id,
- (unsigned int) pci_dev->id.device_id);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
- /* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
-
- return -ENXIO;
+ return cryptodev;
}
int
-rte_cryptodev_pci_generic_remove(struct rte_pci_device *pci_dev,
- cryptodev_pci_uninit_t dev_uninit)
+rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
{
- struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
- int ret;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
- if (cryptodev == NULL)
- return -ENODEV;
+ int retval;
- /* Invoke PMD device uninit function */
- if (dev_uninit) {
- ret = dev_uninit(cryptodev);
- if (ret)
- return ret;
- }
+ CDEV_LOG_INFO("[%s] Closing crypto device %s",
+ cryptodev->device->driver->name,
+ cryptodev->device->name);
/* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
+ retval = rte_cryptodev_pmd_release_device(cryptodev);
+ if (retval)
+ return retval;
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(cryptodev->data->dev_private);
+
cryptodev->device = NULL;
cryptodev->data = NULL;
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index c983eb21..744405e2 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -56,6 +56,35 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
+
+#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 8
+#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS 2048
+
+#define RTE_CRYPTODEV_PMD_NAME_ARG ("name")
+#define RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define RTE_CRYPTODEV_PMD_SOCKET_ID_ARG ("socket_id")
+
+
+static const char * const cryptodev_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG
+};
+
+/**
+ * @internal
+ * Initialisation parameters for crypto devices
+ */
+struct rte_cryptodev_pmd_init_params {
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ size_t private_data_size;
+ int socket_id;
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+};
+
/** Global structure used for maintaining state of allocated crypto devices */
struct rte_cryptodev_global {
struct rte_cryptodev *devs; /**< Device information array */
@@ -65,6 +94,13 @@ struct rte_cryptodev_global {
uint8_t max_devs; /**< Max number of devices */
};
+/* Cryptodev driver, containing the driver ID */
+struct cryptodev_driver {
+ TAILQ_ENTRY(cryptodev_driver) next; /**< Next in list. */
+ const struct rte_driver *driver;
+ uint8_t id;
+};
+
/** pointer to global crypto devices data structure. */
extern struct rte_cryptodev_global *rte_cryptodev_globals;
@@ -385,6 +421,63 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id);
extern int
rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev);
+
+/**
+ * @internal
+ *
+ * PMD assist function to parse initialisation arguments for crypto driver
+ * when creating a new crypto PMD device instance.
+ *
+ * PMD driver should set default values for that PMD before calling function,
+ * these default values will be over-written with successfully parsed values
+ * from args string.
+ *
+ * @param params parsed PMD initialisation parameters
+ * @param args input argument string to parse
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_cryptodev_pmd_parse_input_args(
+ struct rte_cryptodev_pmd_init_params *params,
+ const char *args);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for crypto driver to create
+ * and allocate resources for a new crypto PMD device instance.
+ *
+ * @param name crypto device name.
+ * @param device base device instance
+ * @param params PMD initialisation parameters
+ *
+ * @return
+ * - crypto device instance on success
+ * - NULL on creation failure
+ */
+struct rte_cryptodev *
+rte_cryptodev_pmd_create(const char *name,
+ struct rte_device *device,
+ struct rte_cryptodev_pmd_init_params *params);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for crypto driver to
+ * destroy and free resources associated with a crypto PMD device instance.
+ *
+ * @param cryptodev crypto device handle.
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int
+rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev);
+
/**
* Executes all the user application registered callbacks for the specific
* device.
@@ -405,6 +498,29 @@ void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
int
rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+/**
+ * @internal
+ * Allocate Cryptodev driver.
+ *
+ * @param crypto_drv
+ * Pointer to cryptodev_driver.
+ * @param drv
+ * Pointer to rte_driver.
+ *
+ * @return
+ * The driver type identifier
+ */
+uint8_t rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
+ const struct rte_driver *drv);
+
+
+#define RTE_PMD_REGISTER_CRYPTO_DRIVER(crypto_drv, drv, driver_id)\
+RTE_INIT(init_ ##driver_id);\
+static void init_ ##driver_id(void)\
+{\
+ driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv).driver);\
+}
+
static inline void *
get_session_private_data(const struct rte_cryptodev_sym_session *sess,
uint8_t driver_id) {
diff --git a/lib/librte_cryptodev/rte_cryptodev_vdev.h b/lib/librte_cryptodev/rte_cryptodev_vdev.h
deleted file mode 100644
index 94ab9d33..00000000
--- a/lib/librte_cryptodev/rte_cryptodev_vdev.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_CRYPTODEV_VDEV_H_
-#define _RTE_CRYPTODEV_VDEV_H_
-
-#include <rte_vdev.h>
-#include <inttypes.h>
-
-#include "rte_cryptodev.h"
-
-#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8
-#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048
-
-#define RTE_CRYPTODEV_VDEV_NAME ("name")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
-#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
-
-static const char * const cryptodev_vdev_valid_params[] = {
- RTE_CRYPTODEV_VDEV_NAME,
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
-};
-
-/**
- * @internal
- * Initialisation parameters for virtual crypto devices
- */
-struct rte_crypto_vdev_init_params {
- unsigned int max_nb_queue_pairs;
- unsigned int max_nb_sessions;
- uint8_t socket_id;
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-};
-
-/**
- * @internal
- * Creates a new virtual crypto device and returns the pointer
- * to that device.
- *
- * @param name PMD type name
- * @param dev_private_size Size of crypto PMDs private data
- * @param socket_id Socket to allocate resources on.
- * @param vdev Pointer to virtual device structure.
- *
- * @return
- * - Cryptodev pointer if device is successfully created.
- * - NULL if device cannot be created.
- */
-struct rte_cryptodev *
-rte_cryptodev_vdev_pmd_init(const char *name, size_t dev_private_size,
- int socket_id, struct rte_vdev_device *vdev);
-
-/**
- * @internal
- * Parse virtual device initialisation parameters input arguments
- *
- * @params params Initialisation parameters with defaults set.
- * @params input_args Command line arguments
- *
- * @return
- * 0 on successful parse
- * <0 on failure to parse
- */
-int
-rte_cryptodev_vdev_parse_init_params(struct rte_crypto_vdev_init_params *params,
- const char *input_args);
-
-#endif /* _RTE_CRYPTODEV_VDEV_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index e9ba88ac..eb47308b 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -7,7 +7,6 @@ DPDK_16.04 {
rte_cryptodev_close;
rte_cryptodev_count;
rte_cryptodev_configure;
- rte_cryptodev_create_vdev;
rte_cryptodev_get_dev_id;
rte_cryptodev_get_feature_name;
rte_cryptodev_info_get;
@@ -68,14 +67,21 @@ DPDK_17.08 {
rte_cryptodev_get_aead_algo_enum;
rte_cryptodev_get_header_session_size;
rte_cryptodev_get_private_session_size;
- rte_cryptodev_pci_generic_probe;
- rte_cryptodev_pci_generic_remove;
rte_cryptodev_sym_capability_check_aead;
rte_cryptodev_sym_session_init;
rte_cryptodev_sym_session_clear;
- rte_cryptodev_vdev_parse_init_params;
- rte_cryptodev_vdev_pmd_init;
rte_crypto_aead_algorithm_strings;
rte_crypto_aead_operation_strings;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_cryptodev_get_sec_ctx;
+ rte_cryptodev_name_get;
+ rte_cryptodev_pmd_create;
+ rte_cryptodev_pmd_destroy;
+ rte_cryptodev_pmd_parse_input_args;
+
+} DPDK_17.08;
diff --git a/lib/librte_distributor/Makefile b/lib/librte_distributor/Makefile
index b417ee7b..fee00121 100644
--- a/lib/librte_distributor/Makefile
+++ b/lib/librte_distributor/Makefile
@@ -36,6 +36,7 @@ LIB = librte_distributor.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev
EXPORT_MAP := rte_distributor_version.map
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index 20ba9ffb..57ad3397 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -432,7 +432,7 @@ rte_distributor_process_v1705(struct rte_distributor *d,
next_value = (((int64_t)(uintptr_t)next_mb) <<
RTE_DISTRIB_FLAG_BITS);
/*
- * User is advocated to set tag vaue for each
+ * User is advocated to set tag value for each
* mbuf before calling rte_distributor_process.
* User defined tags are used to identify flows,
* or sessions.
diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
index b09abecd..9adda52b 100644
--- a/lib/librte_distributor/rte_distributor_v20.c
+++ b/lib/librte_distributor/rte_distributor_v20.c
@@ -244,7 +244,7 @@ rte_distributor_process_v20(struct rte_distributor_v20 *d,
next_value = (((int64_t)(uintptr_t)next_mb)
<< RTE_DISTRIB_FLAG_BITS);
/*
- * User is advocated to set tag vaue for each
+ * User is advocated to set tag value for each
* mbuf before calling rte_distributor_process.
* User defined tags are used to identify flows,
* or sessions.
diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index 005019ed..afa117de 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -46,16 +46,15 @@ LDLIBS += -lexecinfo
LDLIBS += -lpthread
LDLIBS += -lgcc_s
-EXPORT_MAP := rte_eal_version.map
+EXPORT_MAP := ../../rte_eal_version.map
-LIBABIVER := 5
+LIBABIVER := 6
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_hugepage_info.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_thread.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_pci.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_debug.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_timer.c
@@ -68,9 +67,6 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_launch.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_vdev.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_pci.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_pci_uio.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_tailqs.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_errno.c
@@ -92,6 +88,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_service.c
# from arch dir
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_cpuflags.c
SRCS-$(CONFIG_RTE_ARCH_X86) += rte_spinlock.c
+SRCS-y += rte_cycles.c
CFLAGS_eal_common_cpuflags.o := $(CPUFLAGS_LIST)
@@ -107,7 +104,7 @@ CFLAGS_eal_thread.o += -Wno-return-type
CFLAGS_eal_hpet.o += -Wno-return-type
endif
-INC := rte_interrupts.h
+INC := # no bsdapp specific headers
SYMLINK-$(CONFIG_RTE_EXEC_ENV_BSDAPP)-include/exec-env := \
$(addprefix include/exec-env/,$(INC))
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 5fa59884..369a682a 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -51,7 +51,6 @@
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
@@ -66,7 +65,6 @@
#include <rte_cpuflags.h>
#include <rte_interrupts.h>
#include <rte_bus.h>
-#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_version.h>
@@ -112,6 +110,13 @@ struct internal_config internal_config;
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
+/* Return mbuf pool ops name */
+const char *
+rte_eal_mbuf_default_mempool_ops(void)
+{
+ return internal_config.mbuf_pool_ops_name;
+}
+
/* Return a pointer to the configuration structure */
struct rte_config *
rte_eal_get_configuration(void)
@@ -119,6 +124,12 @@ rte_eal_get_configuration(void)
return &rte_config;
}
+enum rte_iova_mode
+rte_eal_iova_mode(void)
+{
+ return rte_eal_get_configuration()->iova_mode;
+}
+
/* parse a sysfs (or other) file containing one integer value */
int
eal_parse_sysfs_value(const char *filename, unsigned long *val)
@@ -385,6 +396,9 @@ eal_parse_args(int argc, char **argv)
continue;
switch (opt) {
+ case OPT_MBUF_POOL_OPS_NAME_NUM:
+ internal_config.mbuf_pool_ops_name = optarg;
+ break;
case 'h':
eal_usage(prgname);
exit(EXIT_SUCCESS);
@@ -535,6 +549,29 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (eal_plugins_init() < 0) {
+ rte_eal_init_alert("Cannot init plugins\n");
+ rte_errno = EINVAL;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ if (eal_option_device_parse()) {
+ rte_errno = ENODEV;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ if (rte_bus_scan()) {
+ rte_eal_init_alert("Cannot scan the buses for devices\n");
+ rte_errno = ENODEV;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ /* autodetect the iova mapping mode (default is iova_pa) */
+ rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
+
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
eal_hugepage_info_init() < 0) {
@@ -603,9 +640,6 @@ rte_eal_init(int argc, char **argv)
eal_check_mem_on_local_socket();
- if (eal_plugins_init() < 0)
- rte_eal_init_alert("Cannot init plugins\n");
-
eal_thread_init_master(rte_config.master_lcore);
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -614,17 +648,6 @@ rte_eal_init(int argc, char **argv)
rte_config.master_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
- if (eal_option_device_parse()) {
- rte_errno = ENODEV;
- return -1;
- }
-
- if (rte_bus_scan()) {
- rte_eal_init_alert("Cannot scan the buses for devices\n");
- rte_errno = ENODEV;
- return -1;
- }
-
RTE_LCORE_FOREACH_SLAVE(i) {
/*
@@ -698,3 +721,60 @@ rte_eal_process_type(void)
{
return rte_config.process_type;
}
+
+int rte_eal_has_pci(void)
+{
+ return !internal_config.no_pci;
+}
+
+int rte_eal_create_uio_dev(void)
+{
+ return internal_config.create_uio_dev;
+}
+
+enum rte_intr_mode
+rte_eal_vfio_intr_mode(void)
+{
+ return RTE_INTR_MODE_NONE;
+}
+
+/* dummy forward declaration. */
+struct vfio_device_info;
+
+/* dummy prototypes. */
+int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+ int *vfio_dev_fd, struct vfio_device_info *device_info);
+int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
+int rte_vfio_enable(const char *modname);
+int rte_vfio_is_enabled(const char *modname);
+int rte_vfio_noiommu_is_enabled(void);
+
+int rte_vfio_setup_device(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr,
+ __rte_unused int *vfio_dev_fd,
+ __rte_unused struct vfio_device_info *device_info)
+{
+ return -1;
+}
+
+int rte_vfio_release_device(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr,
+ __rte_unused int fd)
+{
+ return -1;
+}
+
+int rte_vfio_enable(__rte_unused const char *modname)
+{
+ return -1;
+}
+
+int rte_vfio_is_enabled(__rte_unused const char *modname)
+{
+ return 0;
+}
+
+int rte_vfio_noiommu_is_enabled(void)
+{
+ return 0;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_interrupts.c b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
index ea2afff4..deba8770 100644
--- a/lib/librte_eal/bsdapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
@@ -125,3 +125,38 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
RTE_SET_USED(intr_handle);
return 0;
}
+
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout)
+{
+ RTE_SET_USED(epfd);
+ RTE_SET_USED(events);
+ RTE_SET_USED(maxevents);
+ RTE_SET_USED(timeout);
+
+ return -ENOTSUP;
+}
+
+int
+rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
+{
+ RTE_SET_USED(epfd);
+ RTE_SET_USED(op);
+ RTE_SET_USED(fd);
+ RTE_SET_USED(event);
+
+ return -ENOTSUP;
+}
+
+int
+rte_intr_tls_epfd(void)
+{
+ return -ENOTSUP;
+}
+
+void
+rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
+{
+ RTE_SET_USED(intr_handle);
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c
index 3614da8d..6ba05857 100644
--- a/lib/librte_eal/bsdapp/eal/eal_memory.c
+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c
@@ -54,9 +54,14 @@ phys_addr_t
rte_mem_virt2phy(const void *virtaddr)
{
/* XXX not implemented. This function is only used by
- * rte_mempool_virt2phy() when hugepages are disabled. */
+ * rte_mempool_virt2iova() when hugepages are disabled. */
(void)virtaddr;
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
+}
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+ return rte_mem_virt2phy(virtaddr);
}
int
@@ -73,7 +78,7 @@ rte_eal_hugepage_init(void)
/* for debug purposes, hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
addr = malloc(internal_config.memory);
- mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+ mcfg->memseg[0].iova = (rte_iova_t)(uintptr_t)addr;
mcfg->memseg[0].addr = addr;
mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
@@ -88,7 +93,7 @@ rte_eal_hugepage_init(void)
hpi = &internal_config.hugepage_info[i];
for (j = 0; j < hpi->num_pages[0]; j++) {
struct rte_memseg *seg;
- uint64_t physaddr;
+ rte_iova_t physaddr;
int error;
size_t sysctl_size = sizeof(physaddr);
char physaddr_str[64];
@@ -114,7 +119,7 @@ rte_eal_hugepage_init(void)
seg = &mcfg->memseg[seg_idx++];
seg->addr = addr;
- seg->phys_addr = physaddr;
+ seg->iova = physaddr;
seg->hugepage_sz = hpi->hugepage_sz;
seg->len = hpi->hugepage_sz;
seg->nchannel = mcfg->nchannel;
@@ -192,3 +197,9 @@ error:
close(fd_hugepage);
return -1;
}
+
+int
+rte_eal_using_phys_addrs(void)
+{
+ return 0;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_thread.c b/lib/librte_eal/bsdapp/eal/eal_thread.c
index 783d68c5..2a2136a2 100644
--- a/lib/librte_eal/bsdapp/eal/eal_thread.c
+++ b/lib/librte_eal/bsdapp/eal/eal_thread.c
@@ -46,7 +46,6 @@
#include <rte_launch.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_eal.h>
#include <rte_lcore.h>
diff --git a/lib/librte_eal/bsdapp/eal/eal_timer.c b/lib/librte_eal/bsdapp/eal/eal_timer.c
index f12d9bd2..14421943 100644
--- a/lib/librte_eal/bsdapp/eal/eal_timer.c
+++ b/lib/librte_eal/bsdapp/eal/eal_timer.c
@@ -42,7 +42,6 @@
#include <rte_log.h>
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_debug.h>
diff --git a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
deleted file mode 100644
index 99a33432..00000000
--- a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_dom0_common.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Contact Information:
- * Intel Corporation
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _RTE_DOM0_COMMON_H_
-#define _RTE_DOM0_COMMON_H_
-
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
-
-#define DOM0_NAME_MAX 256
-#define DOM0_MM_DEV "/dev/dom0_mm"
-
-#define DOM0_CONTIG_NUM_ORDER 9 /**< 2M order */
-#define DOM0_NUM_MEMSEG 512 /**< Maximum nb. of memory segment. */
-#define DOM0_MEMBLOCK_SIZE 0x200000 /**< Maximum nb. of memory block(2M). */
-#define DOM0_CONFIG_MEMSIZE 4096 /**< Maximum config memory size(4G). */
-#define DOM0_NUM_MEMBLOCK (DOM0_CONFIG_MEMSIZE / 2) /**< Maximum nb. of 2M memory block. */
-
-#define RTE_DOM0_IOCTL_PREPARE_MEMSEG _IOWR(0, 1 , struct memory_info)
-#define RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG _IOWR(0, 2 , char *)
-#define RTE_DOM0_IOCTL_GET_NUM_MEMSEG _IOWR(0, 3, int)
-#define RTE_DOM0_IOCTL_GET_MEMSEG_INFO _IOWR(0, 4, void *)
-
-/**
- * A structure used to store memory information.
- */
-struct memory_info {
- char name[DOM0_NAME_MAX];
- uint64_t size;
-};
-
-/**
- * A structure used to store memory segment information.
- */
-struct memseg_info {
- uint32_t idx;
- uint64_t pfn;
- uint64_t size;
- uint64_t mfn[DOM0_NUM_MEMBLOCK];
-};
-
-/**
- * A structure used to store memory block information.
- */
-struct memblock_info {
- uint8_t exchange_flag;
- uint64_t vir_addr;
- uint64_t pfn;
- uint64_t mfn;
-};
-#endif /* _RTE_DOM0_COMMON_H_ */
diff --git a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
deleted file mode 100644
index c1995ee1..00000000
--- a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_interrupts.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_INTERRUPTS_H_
-#error "don't include this file directly, please include generic <rte_interrupts.h>"
-#endif
-
-#ifndef _RTE_BSDAPP_INTERRUPTS_H_
-#define _RTE_BSDAPP_INTERRUPTS_H_
-
-#define RTE_INTR_VEC_ZERO_OFFSET 0
-#define RTE_INTR_VEC_RXTX_OFFSET 1
-
-#define RTE_MAX_RXTX_INTR_VEC_ID 32
-
-enum rte_intr_handle_type {
- RTE_INTR_HANDLE_UNKNOWN = 0,
- RTE_INTR_HANDLE_UIO, /**< uio device handle */
- RTE_INTR_HANDLE_ALARM, /**< alarm handle */
- RTE_INTR_HANDLE_MAX
-};
-
-/** Handle for interrupts. */
-struct rte_intr_handle {
- int fd; /**< file descriptor */
- int uio_cfg_fd; /**< UIO config file descriptor */
- enum rte_intr_handle_type type; /**< handle type */
- int max_intr; /**< max interrupt requested */
- uint32_t nb_efd; /**< number of available efds */
- int *intr_vec; /**< intr vector number array */
-};
-
-/**
- * @param intr_handle
- * Pointer to the interrupt handle.
- * @param epfd
- * Epoll instance fd which the intr vector associated to.
- * @param op
- * The operation be performed for the vector.
- * Operation type of {ADD, DEL}.
- * @param vec
- * RX intr vector number added to the epoll instance wait list.
- * @param data
- * User raw data.
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int
-rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
- int epfd, int op, unsigned int vec, void *data);
-
-/**
- * It enables the fastpath event fds if it's necessary.
- * It creates event fds when multi-vectors allowed,
- * otherwise it multiplexes the single event fds.
- *
- * @param intr_handle
- * Pointer to the interrupt handle.
- * @param nb_efd
- * Number of interrupt vector trying to enable.
- * The value 0 is not allowed.
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int
-rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd);
-
-/**
- * It disable the fastpath event fds.
- * It deletes registered eventfds and closes the open fds.
- *
- * @param intr_handle
- * Pointer to the interrupt handle.
- */
-void
-rte_intr_efd_disable(struct rte_intr_handle *intr_handle);
-
-/**
- * The fastpath interrupt is enabled or not.
- *
- * @param intr_handle
- * Pointer to the interrupt handle.
- */
-int rte_intr_dp_is_en(struct rte_intr_handle *intr_handle);
-
-/**
- * The interrupt handle instance allows other cause or not.
- * Other cause stands for none fastpath interrupt.
- *
- * @param intr_handle
- * Pointer to the interrupt handle.
- */
-int rte_intr_allow_others(struct rte_intr_handle *intr_handle);
-
-/**
- * The multiple interrupt vector capability of interrupt handle instance.
- * It returns zero if no multiple interrupt vector support.
- *
- * @param intr_handle
- * Pointer to the interrupt handle.
- */
-int
-rte_intr_cap_multiple(struct rte_intr_handle *intr_handle);
-
-#endif /* _RTE_BSDAPP_INTERRUPTS_H_ */
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
deleted file mode 100644
index aac6fd77..00000000
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ /dev/null
@@ -1,239 +0,0 @@
-DPDK_2.0 {
- global:
-
- __rte_panic;
- devargs_list;
- eal_parse_sysfs_value;
- eal_timer_source;
- lcore_config;
- per_lcore__lcore_id;
- per_lcore__rte_errno;
- rte_calloc;
- rte_calloc_socket;
- rte_cpu_check_supported;
- rte_cpu_get_flag_enabled;
- rte_cycles_vmware_tsc_map;
- rte_delay_us;
- rte_dump_physmem_layout;
- rte_dump_registers;
- rte_dump_stack;
- rte_dump_tailq;
- rte_eal_alarm_cancel;
- rte_eal_alarm_set;
- rte_eal_devargs_add;
- rte_eal_devargs_dump;
- rte_eal_devargs_type_count;
- rte_eal_get_configuration;
- rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
- rte_eal_get_physmem_size;
- rte_eal_has_hugepages;
- rte_eal_hpet_init;
- rte_eal_init;
- rte_eal_iopl_init;
- rte_eal_lcore_role;
- rte_eal_mp_remote_launch;
- rte_eal_mp_wait_lcore;
- rte_eal_parse_devargs_str;
- rte_eal_process_type;
- rte_eal_remote_launch;
- rte_eal_tailq_lookup;
- rte_eal_tailq_register;
- rte_eal_wait_lcore;
- rte_exit;
- rte_free;
- rte_get_hpet_cycles;
- rte_get_hpet_hz;
- rte_get_log_level;
- rte_get_log_type;
- rte_get_tsc_hz;
- rte_hexdump;
- rte_intr_callback_register;
- rte_intr_callback_unregister;
- rte_intr_disable;
- rte_intr_enable;
- rte_log;
- rte_log_cur_msg_loglevel;
- rte_log_cur_msg_logtype;
- rte_logs;
- rte_malloc;
- rte_malloc_dump_stats;
- rte_malloc_get_socket_stats;
- rte_malloc_set_limit;
- rte_malloc_socket;
- rte_malloc_validate;
- rte_malloc_virt2phy;
- rte_mem_lock_page;
- rte_mem_phy2mch;
- rte_mem_virt2phy;
- rte_memdump;
- rte_memory_get_nchannel;
- rte_memory_get_nrank;
- rte_memzone_dump;
- rte_memzone_lookup;
- rte_memzone_reserve;
- rte_memzone_reserve_aligned;
- rte_memzone_reserve_bounded;
- rte_memzone_walk;
- rte_openlog_stream;
- rte_realloc;
- rte_set_application_usage_hook;
- rte_set_log_level;
- rte_set_log_type;
- rte_socket_id;
- rte_strerror;
- rte_strsplit;
- rte_sys_gettid;
- rte_thread_get_affinity;
- rte_thread_set_affinity;
- rte_vlog;
- rte_xen_dom0_memory_attach;
- rte_xen_dom0_memory_init;
- rte_zmalloc;
- rte_zmalloc_socket;
-
- local: *;
-};
-
-DPDK_2.1 {
- global:
-
- rte_intr_allow_others;
- rte_intr_dp_is_en;
- rte_intr_efd_disable;
- rte_intr_efd_enable;
- rte_intr_rx_ctl;
- rte_memzone_free;
-
-} DPDK_2.0;
-
-DPDK_2.2 {
- global:
-
- rte_intr_cap_multiple;
- rte_keepalive_create;
- rte_keepalive_dispatch_pings;
- rte_keepalive_mark_alive;
- rte_keepalive_register_core;
- rte_xen_dom0_supported;
-
-} DPDK_2.1;
-
-DPDK_16.04 {
- global:
-
- rte_cpu_get_flag_name;
- rte_eal_primary_proc_alive;
-
-} DPDK_2.2;
-
-DPDK_16.07 {
- global:
-
- pci_get_sysfs_path;
- rte_keepalive_mark_sleep;
- rte_keepalive_register_relay_callback;
- rte_rtm_supported;
- rte_thread_setname;
-
-} DPDK_16.04;
-
-DPDK_16.11 {
- global:
-
- rte_delay_us_block;
- rte_delay_us_callback_register;
- rte_eal_dev_attach;
- rte_eal_dev_detach;
-
-} DPDK_16.07;
-
-DPDK_17.02 {
- global:
-
- rte_bus_dump;
- rte_bus_probe;
- rte_bus_register;
- rte_bus_scan;
- rte_bus_unregister;
-
-} DPDK_16.11;
-
-DPDK_17.05 {
- global:
-
- rte_cpu_is_supported;
- rte_log_dump;
- rte_log_register;
- rte_log_get_global_level;
- rte_log_set_global_level;
- rte_log_set_level;
- rte_log_set_level_regexp;
- rte_pci_detach;
- rte_pci_dump;
- rte_pci_ioport_map;
- rte_pci_ioport_read;
- rte_pci_ioport_unmap;
- rte_pci_ioport_write;
- rte_pci_map_device;
- rte_pci_probe;
- rte_pci_probe_one;
- rte_pci_read_config;
- rte_pci_register;
- rte_pci_scan;
- rte_pci_unmap_device;
- rte_pci_unregister;
- rte_pci_write_config;
- rte_vdev_init;
- rte_vdev_register;
- rte_vdev_uninit;
- rte_vdev_unregister;
- vfio_get_container_fd;
- vfio_get_group_fd;
- vfio_get_group_no;
-
-} DPDK_17.02;
-
-DPDK_17.08 {
- global:
-
- rte_bus_find;
- rte_bus_find_by_device;
- rte_bus_find_by_name;
- rte_log_get_level;
-
-} DPDK_17.05;
-
-EXPERIMENTAL {
- global:
-
- rte_eal_devargs_insert;
- rte_eal_devargs_parse;
- rte_eal_devargs_remove;
- rte_eal_hotplug_add;
- rte_eal_hotplug_remove;
- rte_service_disable_on_lcore;
- rte_service_dump;
- rte_service_enable_on_lcore;
- rte_service_get_by_id;
- rte_service_get_by_name;
- rte_service_get_count;
- rte_service_get_enabled_on_lcore;
- rte_service_is_running;
- rte_service_lcore_add;
- rte_service_lcore_count;
- rte_service_lcore_del;
- rte_service_lcore_list;
- rte_service_lcore_reset_all;
- rte_service_lcore_start;
- rte_service_lcore_stop;
- rte_service_probe_capability;
- rte_service_register;
- rte_service_reset;
- rte_service_set_stats_enable;
- rte_service_start;
- rte_service_start_with_defaults;
- rte_service_stop;
- rte_service_unregister;
-
-} DPDK_17.08;
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index e8fd67a2..9effd0d4 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -32,16 +32,18 @@
include $(RTE_SDK)/mk/rte.vars.mk
INC := rte_branch_prediction.h rte_common.h
-INC += rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h
-INC += rte_log.h rte_memory.h rte_memzone.h rte_pci.h
+INC += rte_debug.h rte_eal.h rte_eal_interrupts.h
+INC += rte_errno.h rte_launch.h rte_lcore.h
+INC += rte_log.h rte_memory.h rte_memzone.h
INC += rte_per_lcore.h rte_random.h
INC += rte_tailq.h rte_interrupts.h rte_alarm.h
INC += rte_string_fns.h rte_version.h
INC += rte_eal_memconfig.h rte_malloc_heap.h
-INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_vdev.h
+INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h
INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
INC += rte_malloc.h rte_keepalive.h rte_time.h
INC += rte_service.h rte_service_component.h
+INC += rte_bitmap.h rte_vfio.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
@@ -49,7 +51,7 @@ GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
ARCH_DIR ?= $(RTE_ARCH)
-ARCH_INC := $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h))
+ARCH_INC := $(sort $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h)))
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \
diff --git a/lib/librte_eal/common/arch/arm/rte_cpuflags.c b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
index 5636e9c1..88f1cbe3 100644
--- a/lib/librte_eal/common/arch/arm/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
@@ -137,7 +137,7 @@ rte_cpu_get_features(hwcap_registers_t out)
_Elfx_auxv_t auxv;
auxv_fd = open("/proc/self/auxv", O_RDONLY);
- assert(auxv_fd);
+ assert(auxv_fd != -1);
while (read(auxv_fd, &auxv, sizeof(auxv)) == sizeof(auxv)) {
if (auxv.a_type == AT_HWCAP) {
out[REG_HWCAP] = auxv.a_un.a_val;
diff --git a/lib/librte_eal/common/arch/arm/rte_cycles.c b/lib/librte_eal/common/arch/arm/rte_cycles.c
new file mode 100644
index 00000000..3e31e5be
--- /dev/null
+++ b/lib/librte_eal/common/arch/arm/rte_cycles.c
@@ -0,0 +1,45 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2015.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "eal_private.h"
+
+uint64_t
+get_tsc_freq_arch(void)
+{
+#if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU
+ uint64_t freq;
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
+ return freq;
+#else
+ return 0;
+#endif
+}
diff --git a/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c b/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
index fcf96e04..970a61c5 100644
--- a/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
@@ -108,7 +108,7 @@ rte_cpu_get_features(hwcap_registers_t out)
Elf64_auxv_t auxv;
auxv_fd = open("/proc/self/auxv", O_RDONLY);
- assert(auxv_fd);
+ assert(auxv_fd != -1);
while (read(auxv_fd, &auxv,
sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
if (auxv.a_type == AT_HWCAP)
diff --git a/lib/librte_eal/common/arch/ppc_64/rte_cycles.c b/lib/librte_eal/common/arch/ppc_64/rte_cycles.c
new file mode 100644
index 00000000..69a9f747
--- /dev/null
+++ b/lib/librte_eal/common/arch/ppc_64/rte_cycles.c
@@ -0,0 +1,52 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) IBM Corporation 2014.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of IBM Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include "eal_filesystem.h"
+#include "eal_private.h"
+
+static const char sys_cpu_dir[] = "/sys/devices/system/cpu";
+
+uint64_t
+get_tsc_freq_arch(void)
+{
+ unsigned long cpu_hz;
+ char path[PATH_MAX];
+
+ snprintf(path, sizeof(path), "%s/cpu%d/cpufreq/cpuinfo_cur_freq",
+ sys_cpu_dir, rte_get_master_lcore());
+ if (eal_parse_sysfs_value(path, &cpu_hz) < 0)
+ RTE_LOG(WARNING, EAL, "Unable to parse %s\n", path);
+
+ return cpu_hz*1000;
+}
diff --git a/lib/librte_eal/common/arch/x86/rte_cpuflags.c b/lib/librte_eal/common/arch/x86/rte_cpuflags.c
index 01382571..7d4a0fef 100644
--- a/lib/librte_eal/common/arch/x86/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/x86/rte_cpuflags.c
@@ -36,6 +36,7 @@
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
+#include <cpuid.h>
enum cpu_register_t {
RTE_REG_EAX = 0,
@@ -156,38 +157,12 @@ const struct feature_entry rte_cpu_feature_table[] = {
FEAT_DEF(INVTSC, 0x80000007, 0, RTE_REG_EDX, 8)
};
-/*
- * Execute CPUID instruction and get contents of a specific register
- *
- * This function, when compiled with GCC, will generate architecture-neutral
- * code, as per GCC manual.
- */
-static void
-rte_cpu_get_features(uint32_t leaf, uint32_t subleaf, cpuid_registers_t out)
-{
-#if defined(__i386__) && defined(__PIC__)
- /* %ebx is a forbidden register if we compile with -fPIC or -fPIE */
- asm volatile("movl %%ebx,%0 ; cpuid ; xchgl %%ebx,%0"
- : "=r" (out[RTE_REG_EBX]),
- "=a" (out[RTE_REG_EAX]),
- "=c" (out[RTE_REG_ECX]),
- "=d" (out[RTE_REG_EDX])
- : "a" (leaf), "c" (subleaf));
-#else
- asm volatile("cpuid"
- : "=a" (out[RTE_REG_EAX]),
- "=b" (out[RTE_REG_EBX]),
- "=c" (out[RTE_REG_ECX]),
- "=d" (out[RTE_REG_EDX])
- : "a" (leaf), "c" (subleaf));
-#endif
-}
-
int
rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
{
const struct feature_entry *feat;
cpuid_registers_t regs;
+ unsigned int maxleaf;
if (feature >= RTE_CPUFLAG_NUMFLAGS)
/* Flag does not match anything in the feature tables */
@@ -199,13 +174,14 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
/* This entry in the table wasn't filled out! */
return -EFAULT;
- rte_cpu_get_features(feat->leaf & 0xffff0000, 0, regs);
- if (((regs[RTE_REG_EAX] ^ feat->leaf) & 0xffff0000) ||
- regs[RTE_REG_EAX] < feat->leaf)
+ maxleaf = __get_cpuid_max(feat->leaf & 0x80000000, NULL);
+
+ if (maxleaf < feat->leaf)
return 0;
- /* get the cpuid leaf containing the desired feature */
- rte_cpu_get_features(feat->leaf, feat->subleaf, regs);
+ __cpuid_count(feat->leaf, feat->subleaf,
+ regs[RTE_REG_EAX], regs[RTE_REG_EBX],
+ regs[RTE_REG_ECX], regs[RTE_REG_EDX]);
/* check if the feature is enabled */
return (regs[feat->reg] >> feat->bit) & 1;
diff --git a/lib/librte_eal/common/arch/x86/rte_cycles.c b/lib/librte_eal/common/arch/x86/rte_cycles.c
new file mode 100644
index 00000000..417850ee
--- /dev/null
+++ b/lib/librte_eal/common/arch/x86/rte_cycles.c
@@ -0,0 +1,152 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <cpuid.h>
+
+#include <rte_common.h>
+
+#include "eal_private.h"
+
+static unsigned int
+rte_cpu_get_model(uint32_t fam_mod_step)
+{
+ uint32_t family, model, ext_model;
+
+ family = (fam_mod_step >> 8) & 0xf;
+ model = (fam_mod_step >> 4) & 0xf;
+
+ if (family == 6 || family == 15) {
+ ext_model = (fam_mod_step >> 16) & 0xf;
+ model += (ext_model << 4);
+ }
+
+ return model;
+}
+
+static int32_t
+rdmsr(int msr, uint64_t *val)
+{
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ int fd;
+ int ret;
+
+ fd = open("/dev/cpu/0/msr", O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ ret = pread(fd, val, sizeof(uint64_t), msr);
+
+ close(fd);
+
+ return ret;
+#else
+ RTE_SET_USED(msr);
+ RTE_SET_USED(val);
+
+ return -1;
+#endif
+}
+
+static uint32_t
+check_model_wsm_nhm(uint8_t model)
+{
+ switch (model) {
+ /* Westmere */
+ case 0x25:
+ case 0x2C:
+ case 0x2F:
+ /* Nehalem */
+ case 0x1E:
+ case 0x1F:
+ case 0x1A:
+ case 0x2E:
+ return 1;
+ }
+
+ return 0;
+}
+
+static uint32_t
+check_model_gdm_dnv(uint8_t model)
+{
+ switch (model) {
+ /* Goldmont */
+ case 0x5C:
+ /* Denverton */
+ case 0x5F:
+ return 1;
+ }
+
+ return 0;
+}
+
+uint64_t
+get_tsc_freq_arch(void)
+{
+ uint64_t tsc_hz = 0;
+ uint32_t a, b, c, d, maxleaf;
+ uint8_t mult, model;
+ int32_t ret;
+
+ /*
+ * Time Stamp Counter and Nominal Core Crystal Clock
+ * Information Leaf
+ */
+ maxleaf = __get_cpuid_max(0, NULL);
+
+ if (maxleaf >= 0x15) {
+ __cpuid(0x15, a, b, c, d);
+
+ /* EBX : TSC/Crystal ratio, ECX : Crystal Hz */
+ if (b && c)
+ return c * (b / a);
+ }
+
+ __cpuid(0x1, a, b, c, d);
+ model = rte_cpu_get_model(a);
+
+ if (check_model_wsm_nhm(model))
+ mult = 133;
+ else if ((c & bit_AVX) || check_model_gdm_dnv(model))
+ mult = 100;
+ else
+ return 0;
+
+ ret = rdmsr(0xCE, &tsc_hz);
+ if (ret < 0)
+ return 0;
+
+ return ((tsc_hz >> 8) & 0xff) * mult * 1E6;
+}
diff --git a/lib/librte_eal/common/arch/x86/rte_memcpy.c b/lib/librte_eal/common/arch/x86/rte_memcpy.c
new file mode 100644
index 00000000..174bef15
--- /dev/null
+++ b/lib/librte_eal/common/arch/x86/rte_memcpy.c
@@ -0,0 +1,58 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memcpy.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+
+void *(*rte_memcpy_ptr)(void *dst, const void *src, size_t n) = NULL;
+
+RTE_INIT(rte_memcpy_init)
+{
+#ifdef CC_SUPPORT_AVX512F
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
+ rte_memcpy_ptr = rte_memcpy_avx512f;
+ RTE_LOG(DEBUG, EAL, "AVX512 memcpy is using!\n");
+ return;
+ }
+#endif
+#ifdef CC_SUPPORT_AVX2
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
+ rte_memcpy_ptr = rte_memcpy_avx2;
+ RTE_LOG(DEBUG, EAL, "AVX2 memcpy is using!\n");
+ return;
+ }
+#endif
+ rte_memcpy_ptr = rte_memcpy_sse;
+ RTE_LOG(DEBUG, EAL, "Default SSE/AVX memcpy is using!\n");
+}
diff --git a/lib/librte_eal/common/arch/x86/rte_spinlock.c b/lib/librte_eal/common/arch/x86/rte_spinlock.c
index c383e9f0..1244a90b 100644
--- a/lib/librte_eal/common/arch/x86/rte_spinlock.c
+++ b/lib/librte_eal/common/arch/x86/rte_spinlock.c
@@ -38,8 +38,7 @@
uint8_t rte_rtm_supported; /* cache the flag to avoid the overhead
of the rte_cpu_get_flag_enabled function */
-static void __attribute__((constructor))
-rte_rtm_init(void)
+RTE_INIT(rte_rtm_init)
{
rte_rtm_supported = rte_cpu_get_flag_enabled(RTE_CPUFLAG_RTM);
}
diff --git a/lib/librte_eal/common/eal_common_bus.c b/lib/librte_eal/common/eal_common_bus.c
index 08bec2d9..3e022d51 100644
--- a/lib/librte_eal/common/eal_common_bus.c
+++ b/lib/librte_eal/common/eal_common_bus.c
@@ -35,6 +35,7 @@
#include <sys/queue.h>
#include <rte_bus.h>
+#include <rte_debug.h>
#include "eal_private.h"
@@ -73,11 +74,9 @@ rte_bus_scan(void)
TAILQ_FOREACH(bus, &rte_bus_list, next) {
ret = bus->scan();
- if (ret) {
+ if (ret)
RTE_LOG(ERR, EAL, "Scan for (%s) bus failed.\n",
bus->name);
- return ret;
- }
}
return 0;
@@ -97,20 +96,16 @@ rte_bus_probe(void)
}
ret = bus->probe();
- if (ret) {
+ if (ret)
RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
bus->name);
- return ret;
- }
}
if (vbus) {
ret = vbus->probe();
- if (ret) {
+ if (ret)
RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
vbus->name);
- return ret;
- }
}
return 0;
@@ -152,15 +147,16 @@ struct rte_bus *
rte_bus_find(const struct rte_bus *start, rte_bus_cmp_t cmp,
const void *data)
{
- struct rte_bus *bus = NULL;
+ struct rte_bus *bus;
- TAILQ_FOREACH(bus, &rte_bus_list, next) {
- if (start && bus == start) {
- start = NULL; /* starting point found */
- continue;
- }
+ if (start != NULL)
+ bus = TAILQ_NEXT(start, next);
+ else
+ bus = TAILQ_FIRST(&rte_bus_list);
+ while (bus != NULL) {
if (cmp(bus, data) == 0)
break;
+ bus = TAILQ_NEXT(bus, next);
}
return bus;
}
@@ -222,3 +218,26 @@ rte_bus_find_by_device_name(const char *str)
c[0] = '\0';
return rte_bus_find(NULL, bus_can_parse, name);
}
+
+
+/*
+ * Get iommu class of devices on the bus.
+ */
+enum rte_iova_mode
+rte_bus_get_iommu_class(void)
+{
+ int mode = RTE_IOVA_DC;
+ struct rte_bus *bus;
+
+ TAILQ_FOREACH(bus, &rte_bus_list, next) {
+
+ if (bus->get_iommu_class)
+ mode |= bus->get_iommu_class();
+ }
+
+ if (mode != RTE_IOVA_VA) {
+ /* Use default IOVA mode */
+ mode = RTE_IOVA_PA;
+ }
+ return mode;
+}
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index e2512755..dda8f583 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -67,7 +67,6 @@ static int cmp_dev_name(const struct rte_device *dev, const void *_name)
int rte_eal_dev_attach(const char *name, const char *devargs)
{
struct rte_bus *bus;
- int ret;
if (name == NULL || devargs == NULL) {
RTE_LOG(ERR, EAL, "Invalid device or arguments provided\n");
@@ -80,22 +79,13 @@ int rte_eal_dev_attach(const char *name, const char *devargs)
name);
return -EINVAL;
}
- if (strcmp(bus->name, "pci") == 0)
- return rte_eal_hotplug_add("pci", name, devargs);
- if (strcmp(bus->name, "vdev") != 0) {
- RTE_LOG(ERR, EAL, "Device attach is only supported for PCI and vdev devices.\n");
- return -ENOTSUP;
- }
+ if (strcmp(bus->name, "pci") == 0 || strcmp(bus->name, "vdev") == 0)
+ return rte_eal_hotplug_add(bus->name, name, devargs);
- /*
- * If we haven't found a bus device the user meant to "hotplug" a
- * virtual device instead.
- */
- ret = rte_vdev_init(name, devargs);
- if (ret)
- RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n",
- name);
- return ret;
+ RTE_LOG(ERR, EAL,
+ "Device attach is only supported for PCI and vdev devices.\n");
+
+ return -ENOTSUP;
}
int rte_eal_dev_detach(struct rte_device *dev)
diff --git a/lib/librte_eal/common/eal_common_errno.c b/lib/librte_eal/common/eal_common_errno.c
index de48d8e4..dc5b7c04 100644
--- a/lib/librte_eal/common/eal_common_errno.c
+++ b/lib/librte_eal/common/eal_common_errno.c
@@ -46,18 +46,20 @@ RTE_DEFINE_PER_LCORE(int, _rte_errno);
const char *
rte_strerror(int errnum)
{
+ /* BSD puts a colon in the "unknown error" messages, Linux doesn't */
+#ifdef RTE_EXEC_ENV_BSDAPP
+ static const char *sep = ":";
+#else
+ static const char *sep = "";
+#endif
#define RETVAL_SZ 256
static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);
+ char *ret = RTE_PER_LCORE(retval);
/* since some implementations of strerror_r throw an error
* themselves if errnum is too big, we handle that case here */
- if (errnum > RTE_MAX_ERRNO)
- snprintf(RTE_PER_LCORE(retval), RETVAL_SZ,
-#ifdef RTE_EXEC_ENV_BSDAPP
- "Unknown error: %d", errnum);
-#else
- "Unknown error %d", errnum);
-#endif
+ if (errnum >= RTE_MAX_ERRNO)
+ snprintf(ret, RETVAL_SZ, "Unknown error%s %d", sep, errnum);
else
switch (errnum){
case E_RTE_SECONDARY:
@@ -65,8 +67,10 @@ rte_strerror(int errnum)
case E_RTE_NO_CONFIG:
return "Missing rte_config structure";
default:
- strerror_r(errnum, RTE_PER_LCORE(retval), RETVAL_SZ);
+ if (strerror_r(errnum, ret, RETVAL_SZ) != 0)
+ snprintf(ret, RETVAL_SZ, "Unknown error%s %d",
+ sep, errnum);
}
- return RTE_PER_LCORE(retval);
+ return ret;
}
diff --git a/lib/librte_eal/common/eal_common_launch.c b/lib/librte_eal/common/eal_common_launch.c
index 137c191d..2d5cae9f 100644
--- a/lib/librte_eal/common/eal_common_launch.c
+++ b/lib/librte_eal/common/eal_common_launch.c
@@ -38,7 +38,6 @@
#include <rte_launch.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_pause.h>
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index 0e3b9320..be404136 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -89,14 +89,6 @@ rte_log_set_global_level(uint32_t level)
rte_logs.level = (uint32_t)level;
}
-/* Set global log level */
-/* replaced by rte_log_set_global_level */
-__rte_deprecated void
-rte_set_log_level(uint32_t level)
-{
- rte_log_set_global_level(level);
-}
-
/* Get global log level */
uint32_t
rte_log_get_global_level(void)
@@ -104,14 +96,6 @@ rte_log_get_global_level(void)
return rte_logs.level;
}
-/* Get global log level */
-/* replaced by rte_log_get_global_level */
-uint32_t
-rte_get_log_level(void)
-{
- return rte_log_get_global_level();
-}
-
int
rte_log_get_level(uint32_t type)
{
@@ -121,30 +105,6 @@ rte_log_get_level(uint32_t type)
return rte_logs.dynamic_types[type].loglevel;
}
-/* Set global log type */
-__rte_deprecated void
-rte_set_log_type(uint32_t type, int enable)
-{
- if (type < RTE_LOGTYPE_FIRST_EXT_ID) {
- if (enable)
- rte_logs.type |= 1 << type;
- else
- rte_logs.type &= ~(1 << type);
- }
-
- if (enable)
- rte_log_set_level(type, 0);
- else
- rte_log_set_level(type, RTE_LOG_DEBUG);
-}
-
-/* Get global log type */
-__rte_deprecated uint32_t
-rte_get_log_type(void)
-{
- return rte_logs.type;
-}
-
int
rte_log_set_level(uint32_t type, uint32_t level)
{
@@ -289,7 +249,8 @@ static const struct logtype logtype_strings[] = {
{RTE_LOGTYPE_USER8, "user8"}
};
-RTE_INIT(rte_log_init);
+/* Logging should be first initialzer (before drivers and bus) */
+RTE_INIT_PRIO(rte_log_init, 101);
static void
rte_log_init(void)
{
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 996877ef..fc6c44da 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -41,7 +41,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_log.h>
@@ -96,11 +95,11 @@ rte_dump_physmem_layout(FILE *f)
if (mcfg->memseg[i].addr == NULL)
break;
- fprintf(f, "Segment %u: phys:0x%"PRIx64", len:%zu, "
+ fprintf(f, "Segment %u: IOVA:0x%"PRIx64", len:%zu, "
"virt:%p, socket_id:%"PRId32", "
"hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
"nrank:%"PRIx32"\n", i,
- mcfg->memseg[i].phys_addr,
+ mcfg->memseg[i].iova,
mcfg->memseg[i].len,
mcfg->memseg[i].addr,
mcfg->memseg[i].socket_id,
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 3026e36b..ea072a25 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -251,7 +251,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
- mz->phys_addr = rte_malloc_virt2phy(mz_addr);
+ mz->iova = rte_malloc_virt2iova(mz_addr);
mz->addr = mz_addr;
mz->len = (requested_len == 0 ? elem->size : requested_len);
mz->hugepage_sz = elem->ms->hugepage_sz;
@@ -391,10 +391,10 @@ rte_memzone_dump(FILE *f)
for (i=0; i<RTE_MAX_MEMZONE; i++) {
if (mcfg->memzone[i].addr == NULL)
break;
- fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
+ fprintf(f, "Zone %u: name:<%s>, IO:0x%"PRIx64", len:0x%zx"
", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
mcfg->memzone[i].name,
- mcfg->memzone[i].phys_addr,
+ mcfg->memzone[i].iova,
mcfg->memzone[i].len,
mcfg->memzone[i].addr,
mcfg->memzone[i].socket_id,
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index 1da185e5..996a0342 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -85,6 +85,7 @@ eal_long_options[] = {
{OPT_LCORES, 1, NULL, OPT_LCORES_NUM },
{OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM },
{OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM },
+ {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM},
{OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM },
{OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM },
{OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM },
@@ -97,7 +98,6 @@ eal_long_options[] = {
{OPT_VDEV, 1, NULL, OPT_VDEV_NUM },
{OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM },
{OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM },
- {OPT_XEN_DOM0, 0, NULL, OPT_XEN_DOM0_NUM },
{0, 0, NULL, 0 }
};
@@ -208,8 +208,6 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->syslog_facility = LOG_DAEMON;
- internal_cfg->xen_dom0_support = 0;
-
/* if set to NONE, interrupt mode is determined automatically */
internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
@@ -220,6 +218,7 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
#endif
internal_cfg->vmware_tsc_map = 0;
internal_cfg->create_uio_dev = 0;
+ internal_cfg->mbuf_pool_ops_name = RTE_MBUF_DEFAULT_MEMPOOL_OPS;
}
static int
@@ -279,12 +278,13 @@ int
eal_plugins_init(void)
{
struct shared_driver *solib = NULL;
+ struct stat sb;
- if (*default_solib_dir != '\0')
+ if (*default_solib_dir != '\0' && stat(default_solib_dir, &sb) == 0 &&
+ S_ISDIR(sb.st_mode))
eal_plugin_add(default_solib_dir);
TAILQ_FOREACH(solib, &solib_list, next) {
- struct stat sb;
if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) {
if (eal_plugindir_init(solib->name) == -1) {
@@ -1279,6 +1279,7 @@ eal_common_usage(void)
" '@' can be omitted if cpus and lcores have the same value\n"
" -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n"
" --"OPT_MASTER_LCORE" ID Core ID that is used as master\n"
+ " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n"
" -n CHANNELS Number of memory channels\n"
" -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
" -r RANKS Force number of memory ranks (don't detect)\n"
diff --git a/lib/librte_eal/common/eal_common_tailqs.c b/lib/librte_eal/common/eal_common_tailqs.c
index 55955f9e..6ae09fdb 100644
--- a/lib/librte_eal/common/eal_common_tailqs.c
+++ b/lib/librte_eal/common/eal_common_tailqs.c
@@ -40,7 +40,6 @@
#include <inttypes.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c
index 2405e93f..55e96963 100644
--- a/lib/librte_eal/common/eal_common_thread.c
+++ b/lib/librte_eal/common/eal_common_thread.c
@@ -53,6 +53,20 @@ unsigned rte_socket_id(void)
return RTE_PER_LCORE(_socket_id);
}
+int
+rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ if (cfg->lcore_role[lcore_id] == role)
+ return 0;
+
+ return -EINVAL;
+}
+
int eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
{
unsigned cpu = 0;
diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c
index ed0b16d0..568ae2fd 100644
--- a/lib/librte_eal/common/eal_common_timer.c
+++ b/lib/librte_eal/common/eal_common_timer.c
@@ -80,8 +80,11 @@ estimate_tsc_freq(void)
void
set_tsc_freq(void)
{
- uint64_t freq = get_tsc_freq();
+ uint64_t freq;
+ freq = get_tsc_freq_arch();
+ if (!freq)
+ freq = get_tsc_freq();
if (!freq)
freq = estimate_tsc_freq();
@@ -94,8 +97,7 @@ void rte_delay_us_callback_register(void (*userfunc)(unsigned int))
rte_delay_us = userfunc;
}
-static void __attribute__((constructor))
-rte_timer_init(void)
+RTE_INIT(rte_timer_init)
{
/* set rte_delay_us_block as a delay function */
rte_delay_us_callback_register(rte_delay_us_block);
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index 7b7e8c88..fa6ccbec 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -65,7 +65,6 @@ struct internal_config {
volatile unsigned force_nrank; /**< force number of ranks */
volatile unsigned no_hugetlbfs; /**< true to disable hugetlbfs */
unsigned hugepage_unlink; /**< true to unlink backing files */
- volatile unsigned xen_dom0_support; /**< support app running on Xen Dom0*/
volatile unsigned no_pci; /**< true to disable PCI */
volatile unsigned no_hpet; /**< true to disable HPET */
volatile unsigned vmware_tsc_map; /**< true to use VMware TSC mapping
@@ -82,7 +81,7 @@ struct internal_config {
volatile enum rte_intr_mode vfio_intr_mode;
const char *hugefile_prefix; /**< the base filename of hugetlbfs files */
const char *hugepage_dir; /**< specific hugetlbfs directory to use */
-
+ const char *mbuf_pool_ops_name; /**< mbuf pool ops name */
unsigned num_hugepage_sizes; /**< how many sizes on this system */
struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
};
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index 439a2610..30e6bb41 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -61,6 +61,8 @@ enum {
OPT_LOG_LEVEL_NUM,
#define OPT_MASTER_LCORE "master-lcore"
OPT_MASTER_LCORE_NUM,
+#define OPT_MBUF_POOL_OPS_NAME "mbuf-pool-ops-name"
+ OPT_MBUF_POOL_OPS_NAME_NUM,
#define OPT_PROC_TYPE "proc-type"
OPT_PROC_TYPE_NUM,
#define OPT_NO_HPET "no-hpet"
@@ -81,8 +83,6 @@ enum {
OPT_VFIO_INTR_NUM,
#define OPT_VMWARE_TSC_MAP "vmware-tsc-map"
OPT_VMWARE_TSC_MAP_NUM,
-#define OPT_XEN_DOM0 "xen-dom0"
- OPT_XEN_DOM0_NUM,
OPT_LONG_MAX_NUM
};
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 597d82e4..462226f1 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -35,8 +35,8 @@
#define _EAL_PRIVATE_H_
#include <stdbool.h>
+#include <stdint.h>
#include <stdio.h>
-#include <rte_pci.h>
/**
* Initialize the memzone subsystem (private to eal).
@@ -109,137 +109,6 @@ int rte_eal_timer_init(void);
*/
int rte_eal_log_init(const char *id, int facility);
-struct rte_pci_driver;
-struct rte_pci_device;
-
-/**
- * Find the name of a PCI device.
- */
-void pci_name_set(struct rte_pci_device *dev);
-
-/**
- * Add a PCI device to the PCI Bus (append to PCI Device list). This function
- * also updates the bus references of the PCI Device (and the generic device
- * object embedded within.
- *
- * @param pci_dev
- * PCI device to add
- * @return void
- */
-void rte_pci_add_device(struct rte_pci_device *pci_dev);
-
-/**
- * Insert a PCI device in the PCI Bus at a particular location in the device
- * list. It also updates the PCI Bus reference of the new devices to be
- * inserted.
- *
- * @param exist_pci_dev
- * Existing PCI device in PCI Bus
- * @param new_pci_dev
- * PCI device to be added before exist_pci_dev
- * @return void
- */
-void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
- struct rte_pci_device *new_pci_dev);
-
-/**
- * Remove a PCI device from the PCI Bus. This sets to NULL the bus references
- * in the PCI device object as well as the generic device object.
- *
- * @param pci_device
- * PCI device to be removed from PCI Bus
- * @return void
- */
-void rte_pci_remove_device(struct rte_pci_device *pci_device);
-
-/**
- * Update a pci device object by asking the kernel for the latest information.
- *
- * This function is private to EAL.
- *
- * @param addr
- * The PCI Bus-Device-Function address to look for
- * @return
- * - 0 on success.
- * - negative on error.
- */
-int pci_update_device(const struct rte_pci_addr *addr);
-
-/**
- * Unbind kernel driver for this device
- *
- * This function is private to EAL.
- *
- * @return
- * 0 on success, negative on error
- */
-int pci_unbind_kernel_driver(struct rte_pci_device *dev);
-
-/**
- * Map the PCI resource of a PCI device in virtual memory
- *
- * This function is private to EAL.
- *
- * @return
- * 0 on success, negative on error
- */
-int pci_uio_map_resource(struct rte_pci_device *dev);
-
-/**
- * Unmap the PCI resource of a PCI device
- *
- * This function is private to EAL.
- */
-void pci_uio_unmap_resource(struct rte_pci_device *dev);
-
-/**
- * Allocate uio resource for PCI device
- *
- * This function is private to EAL.
- *
- * @param dev
- * PCI device to allocate uio resource
- * @param uio_res
- * Pointer to uio resource.
- * If the function returns 0, the pointer will be filled.
- * @return
- * 0 on success, negative on error
- */
-int pci_uio_alloc_resource(struct rte_pci_device *dev,
- struct mapped_pci_resource **uio_res);
-
-/**
- * Free uio resource for PCI device
- *
- * This function is private to EAL.
- *
- * @param dev
- * PCI device to free uio resource
- * @param uio_res
- * Pointer to uio resource.
- */
-void pci_uio_free_resource(struct rte_pci_device *dev,
- struct mapped_pci_resource *uio_res);
-
-/**
- * Map device memory to uio resource
- *
- * This function is private to EAL.
- *
- * @param dev
- * PCI device that has memory information.
- * @param res_idx
- * Memory resource index of the PCI device.
- * @param uio_res
- * uio resource that will keep mapping information.
- * @param map_idx
- * Mapping information index of the uio resource.
- * @return
- * 0 on success, negative on error
- */
-int pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
- struct mapped_pci_resource *uio_res, int map_idx);
-
/**
* Init tail queues for non-EAL library structures. This is to allow
* the rings, mempools, etc. lists to be shared among multiple processes
@@ -315,6 +184,17 @@ void set_tsc_freq(void);
uint64_t get_tsc_freq(void);
/**
+ * Get TSC frequency if the architecture supports.
+ *
+ * This function is private to the EAL.
+ *
+ * @return
+ * The number of TSC cycles in one second.
+ * Returns zero if the architecture support is not available.
+ */
+uint64_t get_tsc_freq_arch(void);
+
+/**
* Prepare physical memory mapping
* i.e. hugepages on Linux and
* contigmem on BSD.
@@ -333,17 +213,6 @@ int rte_eal_hugepage_init(void);
int rte_eal_hugepage_attach(void);
/**
- * Returns true if the system is able to obtain
- * physical addresses. Return false if using DMA
- * addresses through an IOMMU.
- *
- * Drivers based on uio will not load unless physical
- * addresses are obtainable. It is only possible to get
- * physical addresses when running as a privileged user.
- */
-bool rte_eal_using_phys_addrs(void);
-
-/**
* Find a bus capable of identifying a device.
*
* @param str
diff --git a/lib/librte_eal/common/include/arch/arm/rte_vect.h b/lib/librte_eal/common/include/arch/arm/rte_vect.h
index 782350d1..aa887a97 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_vect.h
@@ -136,7 +136,7 @@ vgetq_lane_p64(poly64x2_t x, const int lane)
#endif
/*
- * If (0 <= index <= 15), then call the ASIMD ext intruction on the
+ * If (0 <= index <= 15), then call the ASIMD ext instruction on the
* 128 bit regs v0 and v1 with the appropriate index.
*
* Else returns a zero vector.
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
index 2e04c759..fb3abf18 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
@@ -81,7 +81,7 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
: "memory" ); /* no-clobber list */
#else
asm volatile (
- "mov %%ebx, %%edi\n"
+ "xchgl %%ebx, %%edi;\n"
MPLOCKED
"cmpxchg8b (%[dst]);"
"setz %[res];"
diff --git a/lib/librte_sched/rte_bitmap.h b/lib/librte_eal/common/include/rte_bitmap.h
index 010d752c..010d752c 100644
--- a/lib/librte_sched/rte_bitmap.h
+++ b/lib/librte_eal/common/include/rte_bitmap.h
diff --git a/lib/librte_eal/common/include/rte_bus.h b/lib/librte_eal/common/include/rte_bus.h
index c79368d3..6fb08341 100644
--- a/lib/librte_eal/common/include/rte_bus.h
+++ b/lib/librte_eal/common/include/rte_bus.h
@@ -55,6 +55,21 @@ extern "C" {
/** Double linked list of buses */
TAILQ_HEAD(rte_bus_list, rte_bus);
+
+/**
+ * IOVA mapping mode.
+ *
+ * IOVA mapping mode is iommu programming mode of a device.
+ * That device (for example: IOMMU backed DMA device) based
+ * on rte_iova_mode will generate physical or virtual address.
+ *
+ */
+enum rte_iova_mode {
+ RTE_IOVA_DC = 0, /* Don't care mode */
+ RTE_IOVA_PA = (1 << 0), /* DMA using physical address */
+ RTE_IOVA_VA = (1 << 1) /* DMA using virtual address */
+};
+
/**
* Bus specific scan for devices attached on the bus.
* For each bus object, the scan would be responsible for finding devices and
@@ -168,6 +183,20 @@ struct rte_bus_conf {
enum rte_bus_scan_mode scan_mode; /**< Scan policy. */
};
+
+/**
+ * Get common iommu class of the all the devices on the bus. The bus may
+ * check that those devices are attached to iommu driver.
+ * If no devices are attached to the bus. The bus may return with don't care
+ * (_DC) value.
+ * Otherwise, The bus will return appropriate _pa or _va iova mode.
+ *
+ * @return
+ * enum rte_iova_mode value.
+ */
+typedef enum rte_iova_mode (*rte_bus_get_iommu_class_t)(void);
+
+
/**
* A structure describing a generic bus.
*/
@@ -181,6 +210,7 @@ struct rte_bus {
rte_bus_unplug_t unplug; /**< Remove single device from driver */
rte_bus_parse_t parse; /**< Parse a device name */
struct rte_bus_conf conf; /**< Bus configuration */
+ rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */
};
/**
@@ -280,12 +310,22 @@ struct rte_bus *rte_bus_find_by_device(const struct rte_device *dev);
*/
struct rte_bus *rte_bus_find_by_name(const char *busname);
+
+/**
+ * Get the common iommu class of devices bound on to buses available in the
+ * system. The default mode is PA.
+ *
+ * @return
+ * enum rte_iova_mode value.
+ */
+enum rte_iova_mode rte_bus_get_iommu_class(void);
+
/**
* Helper for Bus registration.
* The constructor has higher priority than PMD constructors.
*/
#define RTE_REGISTER_BUS(nm, bus) \
-RTE_INIT_PRIO(businitfn_ ##nm, 101); \
+RTE_INIT_PRIO(businitfn_ ##nm, 110); \
static void businitfn_ ##nm(void) \
{\
(bus).name = RTE_STR(nm);\
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index 1afc66e3..de853e16 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -109,6 +109,29 @@ typedef uint16_t unaligned_uint16_t;
#define RTE_SET_USED(x) (void)(x)
/**
+ * Run function before main() with low priority.
+ *
+ * The constructor will be run after prioritized constructors.
+ *
+ * @param func
+ * Constructor function.
+ */
+#define RTE_INIT(func) \
+static void __attribute__((constructor, used)) func(void)
+
+/**
+ * Run function before main() with high priority.
+ *
+ * @param func
+ * Constructor function.
+ * @param prio
+ * Priority number must be above 100.
+ * Lowest number is the first to run.
+ */
+#define RTE_INIT_PRIO(func, prio) \
+static void __attribute__((constructor(prio), used)) func(void)
+
+/**
* Force a function to be inlined
*/
#define __rte_always_inline inline __attribute__((always_inline))
diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h
index cab6fb4c..79b67b3e 100644
--- a/lib/librte_eal/common/include/rte_debug.h
+++ b/lib/librte_eal/common/include/rte_debug.h
@@ -79,7 +79,7 @@ void rte_dump_registers(void);
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
-#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#ifdef RTE_ENABLE_ASSERT
#define RTE_ASSERT(exp) RTE_VERIFY(exp)
#else
#define RTE_ASSERT(exp) do {} while (0)
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index 5386d3a2..9342e0cb 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -49,7 +49,6 @@ extern "C" {
#include <stdio.h>
#include <sys/queue.h>
-#include <rte_config.h>
#include <rte_log.h>
__attribute__((format(printf, 2, 0)))
@@ -152,7 +151,11 @@ struct rte_driver {
const char *alias; /**< Driver alias. */
};
-#define RTE_DEV_NAME_MAX_LEN (32)
+/*
+ * Internal identifier length
+ * Sufficiently large to allow for UUID or PCI address
+ */
+#define RTE_DEV_NAME_MAX_LEN 64
/**
* A structure describing a generic device.
@@ -166,28 +169,6 @@ struct rte_device {
};
/**
- * Initialize a driver specified by name.
- *
- * @param name
- * The pointer to a driver name to be initialized.
- * @param args
- * The pointer to arguments used by driver initialization.
- * @return
- * 0 on success, negative on error
- */
-int rte_vdev_init(const char *name, const char *args);
-
-/**
- * Uninitalize a driver specified by name.
- *
- * @param name
- * The pointer to a driver name to be initialized.
- * @return
- * 0 on success, negative on error
- */
-int rte_vdev_uninit(const char *name);
-
-/**
* Attach a device to a registered driver.
*
* @param name
@@ -312,4 +293,4 @@ __attribute__((used)) = str
}
#endif
-#endif /* _RTE_VDEV_H_ */
+#endif /* _RTE_DEV_H_ */
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index 0e7363d7..09b66819 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -44,7 +44,9 @@
#include <sched.h>
#include <rte_per_lcore.h>
-#include <rte_config.h>
+#include <rte_bus.h>
+
+#include <rte_pci_dev_feature_defs.h>
#ifdef __cplusplus
extern "C" {
@@ -87,6 +89,9 @@ struct rte_config {
/** Primary or secondary configuration */
enum rte_proc_type_t process_type;
+ /** PA or VA mapping mode */
+ enum rte_iova_mode iova_mode;
+
/**
* Pointer to memory configuration, which may be shared across multiple
* DPDK instances
@@ -264,6 +269,32 @@ rte_set_application_usage_hook(rte_usage_hook_t usage_func);
int rte_eal_has_hugepages(void);
/**
+ * Whether EAL is using PCI bus.
+ * Disabled by --no-pci option.
+ *
+ * @return
+ * Nonzero if the PCI bus is enabled.
+ */
+int rte_eal_has_pci(void);
+
+/**
+ * Whether the EAL was asked to create UIO device.
+ *
+ * @return
+ * Nonzero if true.
+ */
+int rte_eal_create_uio_dev(void);
+
+/**
+ * The user-configured vfio interrupt mode.
+ *
+ * @return
+ * Interrupt mode configured with the command line,
+ * RTE_INTR_MODE_NONE by default.
+ */
+enum rte_intr_mode rte_eal_vfio_intr_mode(void);
+
+/**
* A wrap API for syscall gettid.
*
* @return
@@ -287,11 +318,22 @@ static inline int rte_gettid(void)
return RTE_PER_LCORE(_thread_id);
}
-#define RTE_INIT(func) \
-static void __attribute__((constructor, used)) func(void)
+/**
+ * Get the iova mode
+ *
+ * @return
+ * enum rte_iova_mode value.
+ */
+enum rte_iova_mode rte_eal_iova_mode(void);
-#define RTE_INIT_PRIO(func, prio) \
-static void __attribute__((constructor(prio), used)) func(void)
+/**
+ * Get default pool ops name for mbuf
+ *
+ * @return
+ * returns default pool ops name.
+ */
+const char *
+rte_eal_mbuf_default_mempool_ops(void);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/common/include/rte_eal_interrupts.h
index 6daffebf..031f78cc 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
+++ b/lib/librte_eal/common/include/rte_eal_interrupts.h
@@ -35,15 +35,26 @@
#error "don't include this file directly, please include generic <rte_interrupts.h>"
#endif
-#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
-#define _RTE_LINUXAPP_INTERRUPTS_H_
+/**
+ * @file rte_eal_interrupts.h
+ * @internal
+ *
+ * Contains function prototypes exposed by the EAL for interrupt handling by
+ * drivers and other DPDK internal consumers.
+ */
+
+#ifndef _RTE_EAL_INTERRUPTS_H_
+#define _RTE_EAL_INTERRUPTS_H_
#define RTE_MAX_RXTX_INTR_VEC_ID 32
#define RTE_INTR_VEC_ZERO_OFFSET 0
#define RTE_INTR_VEC_RXTX_OFFSET 1
+/**
+ * The interrupt source type, e.g. UIO, VFIO, ALARM etc.
+ */
enum rte_intr_handle_type {
- RTE_INTR_HANDLE_UNKNOWN = 0,
+ RTE_INTR_HANDLE_UNKNOWN = 0, /**< generic unknown handle */
RTE_INTR_HANDLE_UIO, /**< uio device handle */
RTE_INTR_HANDLE_UIO_INTX, /**< uio generic handle */
RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */
@@ -52,7 +63,7 @@ enum rte_intr_handle_type {
RTE_INTR_HANDLE_ALARM, /**< alarm handle */
RTE_INTR_HANDLE_EXT, /**< external handler */
RTE_INTR_HANDLE_VDEV, /**< virtual device */
- RTE_INTR_HANDLE_MAX
+ RTE_INTR_HANDLE_MAX /**< count of elements */
};
#define RTE_INTR_EVENT_ADD 1UL
@@ -86,13 +97,13 @@ struct rte_intr_handle {
RTE_STD_C11
union {
int vfio_dev_fd; /**< VFIO device file descriptor */
- int uio_cfg_fd; /**< UIO config file descriptor
- for uio_pci_generic */
+ int uio_cfg_fd; /**< UIO cfg file desc for uio_pci_generic */
};
int fd; /**< interrupt event file descriptor */
enum rte_intr_handle_type type; /**< handle type */
uint32_t max_intr; /**< max interrupt requested */
uint32_t nb_efd; /**< number of available efd(event fd) */
+ uint8_t efd_counter_size; /**< size of efd counter, used for vdev */
int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */
struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID];
/**< intr vector epoll event */
@@ -236,4 +247,4 @@ rte_intr_allow_others(struct rte_intr_handle *intr_handle);
int
rte_intr_cap_multiple(struct rte_intr_handle *intr_handle);
-#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
+#endif /* _RTE_EAL_INTERRUPTS_H_ */
diff --git a/lib/librte_eal/common/include/rte_interrupts.h b/lib/librte_eal/common/include/rte_interrupts.h
index 5d06ed79..43177c7a 100644
--- a/lib/librte_eal/common/include/rte_interrupts.h
+++ b/lib/librte_eal/common/include/rte_interrupts.h
@@ -53,7 +53,7 @@ struct rte_intr_handle;
/** Function to be registered for the specific interrupt */
typedef void (*rte_intr_callback_fn)(void *cb_arg);
-#include <exec-env/rte_interrupts.h>
+#include "rte_eal_interrupts.h"
/**
* It registers the callback for the specific interrupt. Multiple
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index 50e0d0fe..c89e6bab 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -262,6 +262,20 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp);
*/
int rte_thread_setname(pthread_t id, const char *name);
+/**
+ * Test if the core supplied has a specific role
+ *
+ * @param lcore_id
+ * The identifier of the lcore, which MUST be between 0 and
+ * RTE_MAX_LCORE-1.
+ * @param role
+ * The role to be checked against.
+ * @return
+ * On success, return 0; otherwise return a negative value.
+ */
+int
+rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index ec8dba79..16564d41 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -87,6 +87,7 @@ extern struct rte_logs rte_logs;
#define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */
#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */
+#define RTE_LOGTYPE_GSO 20 /**< Log related to GSO. */
/* these log types can be used in an application */
#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */
@@ -138,12 +139,6 @@ int rte_openlog_stream(FILE *f);
void rte_log_set_global_level(uint32_t level);
/**
- * Deprecated, replaced by rte_log_set_global_level().
- */
-__rte_deprecated
-void rte_set_log_level(uint32_t level);
-
-/**
* Get the global log level.
*
* @return
@@ -152,29 +147,6 @@ void rte_set_log_level(uint32_t level);
uint32_t rte_log_get_global_level(void);
/**
- * Deprecated, replaced by rte_log_get_global_level().
- */
-__rte_deprecated
-uint32_t rte_get_log_level(void);
-
-/**
- * Enable or disable the log type.
- *
- * @param type
- * Log type, for example, RTE_LOGTYPE_EAL.
- * @param enable
- * True for enable; false for disable.
- */
-__rte_deprecated
-void rte_set_log_type(uint32_t type, int enable);
-
-/**
- * Get the global log type.
- */
-__rte_deprecated
-uint32_t rte_get_log_type(void);
-
-/**
* Get the log level for a given type.
*
* @param logtype
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 3d37f79b..5d4c11a7 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -323,17 +323,24 @@ int
rte_malloc_set_limit(const char *type, size_t max);
/**
- * Return the physical address of a virtual address obtained through
+ * Return the IO address of a virtual address obtained through
* rte_malloc
*
* @param addr
* Address obtained from a previous rte_malloc call
* @return
- * RTE_BAD_PHYS_ADDR on error
- * otherwise return physical address of the buffer
+ * RTE_BAD_IOVA on error
+ * otherwise return an address suitable for IO
*/
-phys_addr_t
-rte_malloc_virt2phy(const void *addr);
+rte_iova_t
+rte_malloc_virt2iova(const void *addr);
+
+__rte_deprecated
+static inline phys_addr_t
+rte_malloc_virt2phy(const void *addr)
+{
+ return rte_malloc_virt2iova(addr);
+}
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 4aa5d1f7..14aacea5 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -44,12 +44,6 @@
#include <stddef.h>
#include <stdio.h>
-#include <rte_config.h>
-
-#ifdef RTE_EXEC_ENV_LINUXAPP
-#include <exec-env/rte_dom0_common.h>
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -98,14 +92,27 @@ enum rte_page_sizes {
*/
#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE)
-typedef uint64_t phys_addr_t; /**< Physical address definition. */
+typedef uint64_t phys_addr_t; /**< Physical address. */
#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
+/**
+ * IO virtual address type.
+ * When the physical addressing mode (IOVA as PA) is in use,
+ * the translation from an IO virtual address (IOVA) to a physical address
+ * is a direct mapping, i.e. the same value.
+ * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation.
+ */
+typedef uint64_t rte_iova_t;
+#define RTE_BAD_IOVA ((rte_iova_t)-1)
/**
* Physical memory segment descriptor.
*/
struct rte_memseg {
- phys_addr_t phys_addr; /**< Start physical address. */
+ RTE_STD_C11
+ union {
+ phys_addr_t phys_addr; /**< deprecated - Start physical address. */
+ rte_iova_t iova; /**< Start IO address. */
+ };
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
@@ -116,10 +123,6 @@ struct rte_memseg {
int32_t socket_id; /**< NUMA socket ID. */
uint32_t nchannel; /**< Number of channels. */
uint32_t nrank; /**< Number of ranks. */
-#ifdef RTE_LIBRTE_XEN_DOM0
- /**< store segment MFNs */
- uint64_t mfn[DOM0_NUM_MEMBLOCK];
-#endif
} __rte_packed;
/**
@@ -140,11 +143,21 @@ int rte_mem_lock_page(const void *virt);
* @param virt
* The virtual address.
* @return
- * The physical address or RTE_BAD_PHYS_ADDR on error.
+ * The physical address or RTE_BAD_IOVA on error.
*/
phys_addr_t rte_mem_virt2phy(const void *virt);
/**
+ * Get IO virtual address of any mapped virtual address in the current process.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * The IO address or RTE_BAD_IOVA on error.
+ */
+rte_iova_t rte_mem_virt2iova(const void *virt);
+
+/**
* Get the layout of the available physical memory.
*
* It can be useful for an application to have the full physical
@@ -195,68 +208,16 @@ unsigned rte_memory_get_nchannel(void);
*/
unsigned rte_memory_get_nrank(void);
-#ifdef RTE_LIBRTE_XEN_DOM0
-
-/**< Internal use only - should DOM0 memory mapping be used */
-int rte_xen_dom0_supported(void);
-
-/**< Internal use only - phys to virt mapping for xen */
-phys_addr_t rte_xen_mem_phy2mch(int32_t, const phys_addr_t);
-
/**
- * Return the physical address of elt, which is an element of the pool mp.
- *
- * @param memseg_id
- * Identifier of the memory segment owning the physical address. If
- * set to -1, find it automatically.
- * @param phy_addr
- * physical address of elt.
- *
- * @return
- * The physical address or RTE_BAD_PHYS_ADDR on error.
- */
-static inline phys_addr_t
-rte_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
-{
- if (rte_xen_dom0_supported())
- return rte_xen_mem_phy2mch(memseg_id, phy_addr);
- else
- return phy_addr;
-}
-
-/**
- * Memory init for supporting application running on Xen domain0.
- *
- * @param void
+ * Drivers based on uio will not load unless physical
+ * addresses are obtainable. It is only possible to get
+ * physical addresses when running as a privileged user.
*
* @return
- * 0: successfully
- * negative: error
+ * 1 if the system is able to obtain physical addresses.
+ * 0 if using DMA addresses through an IOMMU.
*/
-int rte_xen_dom0_memory_init(void);
-
-/**
- * Attach to memory setments of primary process on Xen domain0.
- *
- * @param void
- *
- * @return
- * 0: successfully
- * negative: error
- */
-int rte_xen_dom0_memory_attach(void);
-#else
-static inline int rte_xen_dom0_supported(void)
-{
- return 0;
-}
-
-static inline phys_addr_t
-rte_mem_phy2mch(int32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
-{
- return phy_addr;
-}
-#endif
+int rte_eal_using_phys_addrs(void);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h
index 1d0827f4..6f0ba182 100644
--- a/lib/librte_eal/common/include/rte_memzone.h
+++ b/lib/librte_eal/common/include/rte_memzone.h
@@ -78,7 +78,11 @@ struct rte_memzone {
#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */
- phys_addr_t phys_addr; /**< Start physical address. */
+ RTE_STD_C11
+ union {
+ phys_addr_t phys_addr; /**< deprecated - Start physical address. */
+ rte_iova_t iova; /**< Start IO address. */
+ };
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */
diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h
deleted file mode 100644
index 8b123391..00000000
--- a/lib/librte_eal/common/include/rte_pci.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright 2013-2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_PCI_H_
-#define _RTE_PCI_H_
-
-/**
- * @file
- *
- * RTE PCI Interface
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <limits.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <rte_debug.h>
-#include <rte_interrupts.h>
-#include <rte_dev.h>
-#include <rte_bus.h>
-
-/** Pathname of PCI devices directory. */
-const char *pci_get_sysfs_path(void);
-
-/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
-#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
-#define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X")
-
-/** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */
-#define PCI_SHORT_PRI_FMT "%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
-
-/** Nb. of values in PCI device identifier format string. */
-#define PCI_FMT_NVAL 4
-
-/** Nb. of values in PCI resource format. */
-#define PCI_RESOURCE_FMT_NVAL 3
-
-/** Maximum number of PCI resources. */
-#define PCI_MAX_RESOURCE 6
-
-/* Forward declarations */
-struct rte_pci_device;
-struct rte_pci_driver;
-
-/** List of PCI devices */
-TAILQ_HEAD(rte_pci_device_list, rte_pci_device);
-/** List of PCI drivers */
-TAILQ_HEAD(rte_pci_driver_list, rte_pci_driver);
-
-/* PCI Bus iterators */
-#define FOREACH_DEVICE_ON_PCIBUS(p) \
- TAILQ_FOREACH(p, &(rte_pci_bus.device_list), next)
-
-#define FOREACH_DRIVER_ON_PCIBUS(p) \
- TAILQ_FOREACH(p, &(rte_pci_bus.driver_list), next)
-
-/**
- * A structure describing an ID for a PCI driver. Each driver provides a
- * table of these IDs for each device that it supports.
- */
-struct rte_pci_id {
- uint32_t class_id; /**< Class ID (class, subclass, pi) or RTE_CLASS_ANY_ID. */
- uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */
- uint16_t device_id; /**< Device ID or PCI_ANY_ID. */
- uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
- uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */
-};
-
-/**
- * A structure describing the location of a PCI device.
- */
-struct rte_pci_addr {
- uint32_t domain; /**< Device domain */
- uint8_t bus; /**< Device bus */
- uint8_t devid; /**< Device ID */
- uint8_t function; /**< Device function. */
-};
-
-struct rte_devargs;
-
-/**
- * A structure describing a PCI device.
- */
-struct rte_pci_device {
- TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */
- struct rte_device device; /**< Inherit core device */
- struct rte_pci_addr addr; /**< PCI location. */
- struct rte_pci_id id; /**< PCI ID. */
- struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
- /**< PCI Memory Resource */
- struct rte_intr_handle intr_handle; /**< Interrupt handle */
- struct rte_pci_driver *driver; /**< Associated driver */
- uint16_t max_vfs; /**< sriov enable if not zero */
- enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
- char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
-};
-
-/**
- * @internal
- * Helper macro for drivers that need to convert to struct rte_pci_device.
- */
-#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
-
-/** Any PCI device identifier (vendor, device, ...) */
-#define PCI_ANY_ID (0xffff)
-#define RTE_CLASS_ANY_ID (0xffffff)
-
-#ifdef __cplusplus
-/** C++ macro used to help building up tables of device IDs */
-#define RTE_PCI_DEVICE(vend, dev) \
- RTE_CLASS_ANY_ID, \
- (vend), \
- (dev), \
- PCI_ANY_ID, \
- PCI_ANY_ID
-#else
-/** Macro used to help building up tables of device IDs */
-#define RTE_PCI_DEVICE(vend, dev) \
- .class_id = RTE_CLASS_ANY_ID, \
- .vendor_id = (vend), \
- .device_id = (dev), \
- .subsystem_vendor_id = PCI_ANY_ID, \
- .subsystem_device_id = PCI_ANY_ID
-#endif
-
-/**
- * Initialisation function for the driver called during PCI probing.
- */
-typedef int (pci_probe_t)(struct rte_pci_driver *, struct rte_pci_device *);
-
-/**
- * Uninitialisation function for the driver called during hotplugging.
- */
-typedef int (pci_remove_t)(struct rte_pci_device *);
-
-/**
- * A structure describing a PCI driver.
- */
-struct rte_pci_driver {
- TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
- struct rte_driver driver; /**< Inherit core driver. */
- struct rte_pci_bus *bus; /**< PCI bus reference. */
- pci_probe_t *probe; /**< Device Probe function. */
- pci_remove_t *remove; /**< Device Remove function. */
- const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
- uint32_t drv_flags; /**< Flags contolling handling of device. */
-};
-
-/**
- * Structure describing the PCI bus
- */
-struct rte_pci_bus {
- struct rte_bus bus; /**< Inherit the generic class */
- struct rte_pci_device_list device_list; /**< List of PCI devices */
- struct rte_pci_driver_list driver_list; /**< List of PCI drivers */
-};
-
-/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
-#define RTE_PCI_DRV_NEED_MAPPING 0x0001
-/** Device driver supports link state interrupt */
-#define RTE_PCI_DRV_INTR_LSC 0x0008
-/** Device driver supports device removal interrupt */
-#define RTE_PCI_DRV_INTR_RMV 0x0010
-/** Device driver needs to keep mapped resources if unsupported dev detected */
-#define RTE_PCI_DRV_KEEP_MAPPED_RES 0x0020
-
-/**
- * A structure describing a PCI mapping.
- */
-struct pci_map {
- void *addr;
- char *path;
- uint64_t offset;
- uint64_t size;
- uint64_t phaddr;
-};
-
-/**
- * A structure describing a mapped PCI resource.
- * For multi-process we need to reproduce all PCI mappings in secondary
- * processes, so save them in a tailq.
- */
-struct mapped_pci_resource {
- TAILQ_ENTRY(mapped_pci_resource) next;
-
- struct rte_pci_addr pci_addr;
- char path[PATH_MAX];
- int nb_maps;
- struct pci_map maps[PCI_MAX_RESOURCE];
-};
-
-/** mapped pci device list */
-TAILQ_HEAD(mapped_pci_res_list, mapped_pci_resource);
-
-/**< Internal use only - Macro used by pci addr parsing functions **/
-#define GET_PCIADDR_FIELD(in, fd, lim, dlm) \
-do { \
- unsigned long val; \
- char *end; \
- errno = 0; \
- val = strtoul((in), &end, 16); \
- if (errno != 0 || end[0] != (dlm) || val > (lim)) \
- return -EINVAL; \
- (fd) = (typeof (fd))val; \
- (in) = end + 1; \
-} while(0)
-
-/**
- * Utility function to produce a PCI Bus-Device-Function value
- * given a string representation. Assumes that the BDF is provided without
- * a domain prefix (i.e. domain returned is always 0)
- *
- * @param input
- * The input string to be parsed. Should have the format XX:XX.X
- * @param dev_addr
- * The PCI Bus-Device-Function address to be returned. Domain will always be
- * returned as 0
- * @return
- * 0 on success, negative on error.
- */
-static inline int
-eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
-{
- dev_addr->domain = 0;
- GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
- GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
- GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return 0;
-}
-
-/**
- * Utility function to produce a PCI Bus-Device-Function value
- * given a string representation. Assumes that the BDF is provided including
- * a domain prefix.
- *
- * @param input
- * The input string to be parsed. Should have the format XXXX:XX:XX.X
- * @param dev_addr
- * The PCI Bus-Device-Function address to be returned
- * @return
- * 0 on success, negative on error.
- */
-static inline int
-eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
-{
- GET_PCIADDR_FIELD(input, dev_addr->domain, UINT16_MAX, ':');
- GET_PCIADDR_FIELD(input, dev_addr->bus, UINT8_MAX, ':');
- GET_PCIADDR_FIELD(input, dev_addr->devid, UINT8_MAX, '.');
- GET_PCIADDR_FIELD(input, dev_addr->function, UINT8_MAX, 0);
- return 0;
-}
-#undef GET_PCIADDR_FIELD
-
-/**
- * Utility function to write a pci device name, this device name can later be
- * used to retrieve the corresponding rte_pci_addr using eal_parse_pci_*
- * BDF helpers.
- *
- * @param addr
- * The PCI Bus-Device-Function address
- * @param output
- * The output buffer string
- * @param size
- * The output buffer size
- */
-static inline void
-rte_pci_device_name(const struct rte_pci_addr *addr,
- char *output, size_t size)
-{
- RTE_VERIFY(size >= PCI_PRI_STR_SIZE);
- RTE_VERIFY(snprintf(output, size, PCI_PRI_FMT,
- addr->domain, addr->bus,
- addr->devid, addr->function) >= 0);
-}
-
-/* Compare two PCI device addresses. */
-/**
- * Utility function to compare two PCI device addresses.
- *
- * @param addr
- * The PCI Bus-Device-Function address to compare
- * @param addr2
- * The PCI Bus-Device-Function address to compare
- * @return
- * 0 on equal PCI address.
- * Positive on addr is greater than addr2.
- * Negative on addr is less than addr2, or error.
- */
-static inline int
-rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
- const struct rte_pci_addr *addr2)
-{
- uint64_t dev_addr, dev_addr2;
-
- if ((addr == NULL) || (addr2 == NULL))
- return -1;
-
- dev_addr = ((uint64_t)addr->domain << 24) |
- (addr->bus << 16) | (addr->devid << 8) | addr->function;
- dev_addr2 = ((uint64_t)addr2->domain << 24) |
- (addr2->bus << 16) | (addr2->devid << 8) | addr2->function;
-
- if (dev_addr > dev_addr2)
- return 1;
- else if (dev_addr < dev_addr2)
- return -1;
- else
- return 0;
-}
-
-/**
- * Scan the content of the PCI bus, and the devices in the devices
- * list
- *
- * @return
- * 0 on success, negative on error
- */
-int rte_pci_scan(void);
-
-/**
- * Probe the PCI bus
- *
- * @return
- * - 0 on success.
- * - !0 on error.
- */
-int
-rte_pci_probe(void);
-
-/**
- * Map the PCI device resources in user space virtual memory address
- *
- * Note that driver should not call this function when flag
- * RTE_PCI_DRV_NEED_MAPPING is set, as EAL will do that for
- * you when it's on.
- *
- * @param dev
- * A pointer to a rte_pci_device structure describing the device
- * to use
- *
- * @return
- * 0 on success, negative on error and positive if no driver
- * is found for the device.
- */
-int rte_pci_map_device(struct rte_pci_device *dev);
-
-/**
- * Unmap this device
- *
- * @param dev
- * A pointer to a rte_pci_device structure describing the device
- * to use
- */
-void rte_pci_unmap_device(struct rte_pci_device *dev);
-
-/**
- * @internal
- * Map a particular resource from a file.
- *
- * @param requested_addr
- * The starting address for the new mapping range.
- * @param fd
- * The file descriptor.
- * @param offset
- * The offset for the mapping range.
- * @param size
- * The size for the mapping range.
- * @param additional_flags
- * The additional flags for the mapping range.
- * @return
- * - On success, the function returns a pointer to the mapped area.
- * - On error, the value MAP_FAILED is returned.
- */
-void *pci_map_resource(void *requested_addr, int fd, off_t offset,
- size_t size, int additional_flags);
-
-/**
- * @internal
- * Unmap a particular resource.
- *
- * @param requested_addr
- * The address for the unmapping range.
- * @param size
- * The size for the unmapping range.
- */
-void pci_unmap_resource(void *requested_addr, size_t size);
-
-/**
- * Probe the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the probe() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to probe.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_probe_one(const struct rte_pci_addr *addr);
-
-/**
- * Close the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the remove() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to close.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_detach(const struct rte_pci_addr *addr);
-
-/**
- * Dump the content of the PCI bus.
- *
- * @param f
- * A pointer to a file for output
- */
-void rte_pci_dump(FILE *f);
-
-/**
- * Register a PCI driver.
- *
- * @param driver
- * A pointer to a rte_pci_driver structure describing the driver
- * to be registered.
- */
-void rte_pci_register(struct rte_pci_driver *driver);
-
-/** Helper for PCI device registration from driver (eth, crypto) instance */
-#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
-RTE_INIT(pciinitfn_ ##nm); \
-static void pciinitfn_ ##nm(void) \
-{\
- (pci_drv).driver.name = RTE_STR(nm);\
- rte_pci_register(&pci_drv); \
-} \
-RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
-
-/**
- * Unregister a PCI driver.
- *
- * @param driver
- * A pointer to a rte_pci_driver structure describing the driver
- * to be unregistered.
- */
-void rte_pci_unregister(struct rte_pci_driver *driver);
-
-/**
- * Read PCI config space.
- *
- * @param device
- * A pointer to a rte_pci_device structure describing the device
- * to use
- * @param buf
- * A data buffer where the bytes should be read into
- * @param len
- * The length of the data buffer.
- * @param offset
- * The offset into PCI config space
- */
-int rte_pci_read_config(const struct rte_pci_device *device,
- void *buf, size_t len, off_t offset);
-
-/**
- * Write PCI config space.
- *
- * @param device
- * A pointer to a rte_pci_device structure describing the device
- * to use
- * @param buf
- * A data buffer containing the bytes should be written
- * @param len
- * The length of the data buffer.
- * @param offset
- * The offset into PCI config space
- */
-int rte_pci_write_config(const struct rte_pci_device *device,
- const void *buf, size_t len, off_t offset);
-
-/**
- * A structure used to access io resources for a pci device.
- * rte_pci_ioport is arch, os, driver specific, and should not be used outside
- * of pci ioport api.
- */
-struct rte_pci_ioport {
- struct rte_pci_device *dev;
- uint64_t base;
- uint64_t len; /* only filled for memory mapped ports */
-};
-
-/**
- * Initialize a rte_pci_ioport object for a pci device io resource.
- *
- * This object is then used to gain access to those io resources (see below).
- *
- * @param dev
- * A pointer to a rte_pci_device structure describing the device
- * to use.
- * @param bar
- * Index of the io pci resource we want to access.
- * @param p
- * The rte_pci_ioport object to be initialized.
- * @return
- * 0 on success, negative on error.
- */
-int rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
- struct rte_pci_ioport *p);
-
-/**
- * Release any resources used in a rte_pci_ioport object.
- *
- * @param p
- * The rte_pci_ioport object to be uninitialized.
- * @return
- * 0 on success, negative on error.
- */
-int rte_pci_ioport_unmap(struct rte_pci_ioport *p);
-
-/**
- * Read from a io pci resource.
- *
- * @param p
- * The rte_pci_ioport object from which we want to read.
- * @param data
- * A data buffer where the bytes should be read into
- * @param len
- * The length of the data buffer.
- * @param offset
- * The offset into the pci io resource.
- */
-void rte_pci_ioport_read(struct rte_pci_ioport *p,
- void *data, size_t len, off_t offset);
-
-/**
- * Write to a io pci resource.
- *
- * @param p
- * The rte_pci_ioport object to which we want to write.
- * @param data
- * A data buffer where the bytes should be read into
- * @param len
- * The length of the data buffer.
- * @param offset
- * The offset into the pci io resource.
- */
-void rte_pci_ioport_write(struct rte_pci_ioport *p,
- const void *data, size_t len, off_t offset);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PCI_H_ */
diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h
index 7c6f7383..92724406 100644
--- a/lib/librte_eal/common/include/rte_service.h
+++ b/lib/librte_eal/common/include/rte_service.h
@@ -61,9 +61,6 @@ extern "C" {
#include <rte_lcore.h>
-/* forward declaration only. Definition in rte_service_private.h */
-struct rte_service_spec;
-
#define RTE_SERVICE_NAME_MAX 32
/* Capabilities of a service.
@@ -89,40 +86,32 @@ struct rte_service_spec;
*/
uint32_t rte_service_get_count(void);
-
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Return the specification of a service by integer id.
+ * Return the id of a service by name.
*
- * This function provides the specification of a service. This can be used by
- * the application to understand what the service represents. The service
- * must not be modified by the application directly, only passed to the various
- * rte_service_* functions.
- *
- * @param id The integer id of the service to retrieve
- * @retval non-zero A valid pointer to the service_spec
- * @retval NULL Invalid *id* provided.
- */
-struct rte_service_spec *rte_service_get_by_id(uint32_t id);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
+ * This function provides the id of the service using the service name as
+ * lookup key. The service id is to be passed to other functions in the
+ * rte_service_* API.
*
- * Return the specification of a service by name.
- *
- * This function provides the specification of a service using the service name
- * as lookup key. This can be used by the application to understand what the
- * service represents. The service must not be modified by the application
- * directly, only passed to the various rte_service_* functions.
+ * Example usage:
+ * @code
+ * uint32_t service_id;
+ * int32_t ret = rte_service_get_by_name("service_X", &service_id);
+ * if (ret) {
+ * // handle error
+ * }
+ * @endcode
*
* @param name The name of the service to retrieve
- * @retval non-zero A valid pointer to the service_spec
- * @retval NULL Invalid *name* provided.
+ * @param[out] service_id A pointer to a uint32_t, to be filled in with the id.
+ * @retval 0 Success. The service id is provided in *service_id*.
+ * @retval -EINVAL Null *service_id* pointer provided
+ * @retval -ENODEV No such service registered
*/
-struct rte_service_spec *rte_service_get_by_name(const char *name);
+int32_t rte_service_get_by_name(const char *name, uint32_t *service_id);
/**
* @warning
@@ -133,7 +122,7 @@ struct rte_service_spec *rte_service_get_by_name(const char *name);
* @return A pointer to the name of the service. The returned pointer remains
* in ownership of the service, and the application must not free it.
*/
-const char *rte_service_get_name(const struct rte_service_spec *service);
+const char *rte_service_get_name(uint32_t id);
/**
* @warning
@@ -146,17 +135,16 @@ const char *rte_service_get_name(const struct rte_service_spec *service);
* @retval 1 Capability supported by this service instance
* @retval 0 Capability not supported by this service instance
*/
-int32_t rte_service_probe_capability(const struct rte_service_spec *service,
- uint32_t capability);
+int32_t rte_service_probe_capability(uint32_t id, uint32_t capability);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Enable a core to run a service.
+ * Map or unmap a lcore to a service.
*
- * Each core can be added or removed from running specific services. This
- * functions adds *lcore* to the set of cores that will run *service*.
+ * Each core can be added or removed from running a specific service. This
+ * function enables or disables *lcore* to run *service_id*.
*
* If multiple cores are enabled on a service, an atomic is used to ensure that
* only one cores runs the service at a time. The exception to this is when
@@ -164,82 +152,120 @@ int32_t rte_service_probe_capability(const struct rte_service_spec *service,
* called RTE_SERVICE_CAP_MT_SAFE. With the multi-thread safe capability set,
* the service function can be run on multiple threads at the same time.
*
- * @retval 0 lcore added successfully
+ * @param service_id the service to apply the lcore to
+ * @param lcore The lcore that will be mapped to service
+ * @param enable Zero to unmap or disable the core, non-zero to enable
+ *
+ * @retval 0 lcore map updated successfully
* @retval -EINVAL An invalid service or lcore was provided.
*/
-int32_t rte_service_enable_on_lcore(struct rte_service_spec *service,
- uint32_t lcore);
+int32_t rte_service_map_lcore_set(uint32_t service_id, uint32_t lcore,
+ uint32_t enable);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Disable a core to run a service.
+ * Retrieve the mapping of an lcore to a service.
*
- * Each core can be added or removed from running specific services. This
- * functions removes *lcore* to the set of cores that will run *service*.
+ * @param service_id the service to apply the lcore to
+ * @param lcore The lcore that will be mapped to service
*
- * @retval 0 Lcore removed successfully
+ * @retval 1 lcore is mapped to service
+ * @retval 0 lcore is not mapped to service
* @retval -EINVAL An invalid service or lcore was provided.
*/
-int32_t rte_service_disable_on_lcore(struct rte_service_spec *service,
- uint32_t lcore);
+int32_t rte_service_map_lcore_get(uint32_t service_id, uint32_t lcore);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Return if an lcore is enabled for the service.
+ * Set the runstate of the service.
*
- * This function allows the application to query if *lcore* is currently set to
- * run *service*.
+ * Each service is either running or stopped. Setting a non-zero runstate
+ * enables the service to run, while setting runstate zero disables it.
*
- * @retval 1 Lcore enabled on this lcore
- * @retval 0 Lcore disabled on this lcore
- * @retval -EINVAL An invalid service or lcore was provided.
+ * @param id The id of the service
+ * @param runstate The run state to apply to the service
+ *
+ * @retval 0 The service was successfully started
+ * @retval -EINVAL Invalid service id
*/
-int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
- uint32_t lcore);
-
+int32_t rte_service_runstate_set(uint32_t id, uint32_t runstate);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Enable *service* to run.
- *
- * This function switches on a service during runtime.
- * @retval 0 The service was successfully started
+ * Get the runstate for the service with *id*. See *rte_service_runstate_set*
+ * for details of runstates. A service can call this function to ensure that
+ * the application has indicated that it will receive CPU cycles. Either a
+ * service-core is mapped (default case), or the application has explicitly
+ * disabled the check that a service-cores is mapped to the service and takes
+ * responsibility to run the service manually using the available function
+ * *rte_service_run_iter_on_app_lcore* to do so.
+ *
+ * @retval 1 Service is running
+ * @retval 0 Service is stopped
+ * @retval -EINVAL Invalid service id
*/
-int32_t rte_service_start(struct rte_service_spec *service);
+int32_t rte_service_runstate_get(uint32_t id);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Disable *service*.
+ * Enable or disable the check for a service-core being mapped to the service.
+ * An application can disable the check when takes the responsibility to run a
+ * service itself using *rte_service_run_iter_on_app_lcore*.
+ *
+ * @param id The id of the service to set the check on
+ * @param enable When zero, the check is disabled. Non-zero enables the check.
*
- * Switch off a service, so it is not run until it is *rte_service_start* is
- * called on it.
- * @retval 0 Service successfully switched off
+ * @retval 0 Success
+ * @retval -EINVAL Invalid service ID
*/
-int32_t rte_service_stop(struct rte_service_spec *service);
+int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enable);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Returns if *service* is currently running.
- *
- * This function returns true if the service has been started using
- * *rte_service_start*, AND a service core is mapped to the service. This
- * function can be used to ensure that the service will be run.
- *
- * @retval 1 Service is currently running, and has a service lcore mapped
- * @retval 0 Service is currently stopped, or no service lcore is mapped
- * @retval -EINVAL Invalid service pointer provided
+ * This function runs a service callback from a non-service lcore.
+ *
+ * This function is designed to enable gradual porting to service cores, and
+ * to enable unit tests to verify a service behaves as expected.
+ *
+ * When called, this function ensures that the service identified by *id* is
+ * safe to run on this lcore. Multi-thread safe services are invoked even if
+ * other cores are simultaneously running them as they are multi-thread safe.
+ *
+ * Multi-thread unsafe services are handled depending on the variable
+ * *serialize_multithread_unsafe*:
+ * - When set, the function will check if a service is already being invoked
+ * on another lcore, refusing to run it and returning -EBUSY.
+ * - When zero, the application takes responsibility to ensure that the service
+ * indicated by *id* is not going to be invoked by another lcore. This setting
+ * avoids atomic operations, so is likely to be more performant.
+ *
+ * @param id The ID of the service to run
+ * @param serialize_multithread_unsafe This parameter indicates to the service
+ * cores library if it is required to use atomics to serialize access
+ * to mult-thread unsafe services. As there is an overhead in using
+ * atomics, applications can choose to enable or disable this feature
+ *
+ * Note that any thread calling this function MUST be a DPDK EAL thread, as
+ * the *rte_lcore_id* function is used to access internal data structures.
+ *
+ * @retval 0 Service was run on the calling thread successfully
+ * @retval -EBUSY Another lcore is executing the service, and it is not a
+ * multi-thread safe service, so the service was not run on this lcore
+ * @retval -ENOEXEC Service is not in a run-able state
+ * @retval -EINVAL Invalid service id
*/
-int32_t rte_service_is_running(const struct rte_service_spec *service);
+int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
+ uint32_t serialize_multithread_unsafe);
/**
* @warning
@@ -341,13 +367,12 @@ int32_t rte_service_lcore_reset_all(void);
* Enable or disable statistics collection for *service*.
*
* This function enables per core, per-service cycle count collection.
- * @param service The service to enable statistics gathering on.
+ * @param id The service to enable statistics gathering on.
* @param enable Zero to disable statistics, non-zero to enable.
* @retval 0 Success
* @retval -EINVAL Invalid service pointer passed
*/
-int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
- int32_t enable);
+int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable);
/**
* @warning
@@ -374,10 +399,26 @@ int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Dumps any information available about the service. If service is NULL,
- * dumps info for all services.
+ * Get the numer of services running on the supplied lcore.
+ *
+ * @param lcore Id of the service core.
+ * @retval >=0 Number of services registered to this core.
+ * @retval -EINVAL Invalid lcore provided
+ * @retval -ENOTSUP The provided lcore is not a service core.
+ */
+int32_t rte_service_lcore_count_services(uint32_t lcore);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Dumps any information available about the service. When id is UINT32_MAX,
+ * this function dumps info for all services.
+ *
+ * @retval 0 Statistics have been successfully dumped
+ * @retval -EINVAL Invalid service id provided
*/
-int32_t rte_service_dump(FILE *f, struct rte_service_spec *service);
+int32_t rte_service_dump(FILE *f, uint32_t id);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_service_component.h b/lib/librte_eal/common/include/rte_service_component.h
index 7a946a1e..ac965cb4 100644
--- a/lib/librte_eal/common/include/rte_service_component.h
+++ b/lib/librte_eal/common/include/rte_service_component.h
@@ -85,21 +85,30 @@ struct rte_service_spec {
*
* For example the eventdev SW PMD requires CPU cycles to perform its
* scheduling. This can be achieved by registering it as a service, and the
- * application can then assign CPU resources to it using
- * *rte_service_set_coremask*.
+ * application can then assign CPU resources to that service.
+ *
+ * Note that when a service component registers itself, it is not permitted to
+ * add or remove service-core threads, or modify lcore-to-service mappings. The
+ * only API that may be called by the service-component is
+ * *rte_service_component_runstate_set*, which indicates that the service
+ * component is ready to be executed.
*
* @param spec The specification of the service to register
+ * @param[out] service_id A pointer to a uint32_t, which will be filled in
+ * during registration of the service. It is set to the integers
+ * service number given to the service. This parameter may be NULL.
* @retval 0 Successfully registered the service.
* -EINVAL Attempted to register an invalid service (eg, no callback
* set)
*/
-int32_t rte_service_register(const struct rte_service_spec *spec);
+int32_t rte_service_component_register(const struct rte_service_spec *spec,
+ uint32_t *service_id);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Unregister a service.
+ * Unregister a service component.
*
* The service being removed must be stopped before calling this function.
*
@@ -107,7 +116,7 @@ int32_t rte_service_register(const struct rte_service_spec *spec);
* @retval -EBUSY The service is currently running, stop the service before
* calling unregister. No action has been taken.
*/
-int32_t rte_service_unregister(struct rte_service_spec *service);
+int32_t rte_service_component_unregister(uint32_t id);
/**
* @warning
@@ -131,6 +140,23 @@ int32_t rte_service_start_with_defaults(void);
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
+ * Set the backend runstate of a component.
+ *
+ * This function allows services to be registered at startup, but not yet
+ * enabled to run by default. When the service has been configured (via the
+ * usual method; eg rte_eventdev_configure, the service can mark itself as
+ * ready to run. The differentiation between backend runstate and
+ * service_runstate is that the backend runstate is set by the service
+ * component while the service runstate is reserved for application usage.
+ *
+ * @retval 0 Success
+ */
+int32_t rte_service_component_runstate_set(uint32_t id, uint32_t runstate);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
* Initialize the service library.
*
* In order to use the service library, it must be initialized. EAL initializes
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index a69a7075..d08cf48a 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -61,7 +61,7 @@ extern "C" {
/**
* Minor version/month number i.e. the mm in yy.mm.z
*/
-#define RTE_VER_MONTH 8
+#define RTE_VER_MONTH 11
/**
* Patch level number i.e. the z in yy.mm.z
@@ -71,14 +71,14 @@ extern "C" {
/**
* Extra string to be appended to version number
*/
-#define RTE_VER_SUFFIX ""
+#define RTE_VER_SUFFIX "-rc"
/**
* Patch release number
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 16
+#define RTE_VER_RELEASE 3
/**
* Macro to compute a version number usable for comparisons
diff --git a/lib/librte_eal/common/include/rte_vfio.h b/lib/librte_eal/common/include/rte_vfio.h
new file mode 100644
index 00000000..a69c4ff6
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_vfio.h
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 6WIND S.A. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_VFIO_H_
+#define _RTE_VFIO_H_
+
+/*
+ * determine if VFIO is present on the system
+ */
+#if !defined(VFIO_PRESENT) && defined(RTE_EAL_VFIO)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+#define VFIO_PRESENT
+#endif /* kernel version >= 3.6.0 */
+#endif /* RTE_EAL_VFIO */
+
+#ifdef VFIO_PRESENT
+
+#include <linux/vfio.h>
+
+#define VFIO_DIR "/dev/vfio"
+#define VFIO_CONTAINER_PATH "/dev/vfio/vfio"
+#define VFIO_GROUP_FMT "/dev/vfio/%u"
+#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u"
+#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL)
+#define VFIO_GET_REGION_IDX(x) (x >> 40)
+#define VFIO_NOIOMMU_MODE \
+ "/sys/module/vfio/parameters/enable_unsafe_noiommu_mode"
+
+/**
+ * Setup vfio_cfg for the device identified by its address.
+ * It discovers the configured I/O MMU groups or sets a new one for the device.
+ * If a new groups is assigned, the DMA mapping is performed.
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param sysfs_base
+ * sysfs path prefix.
+ *
+ * @param dev_addr
+ * device location.
+ *
+ * @param vfio_dev_fd
+ * VFIO fd.
+ *
+ * @param device_info
+ * Device information.
+ *
+ * @return
+ * 0 on success.
+ * <0 on failure.
+ * >1 if the device cannot be managed this way.
+ */
+int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+ int *vfio_dev_fd, struct vfio_device_info *device_info);
+
+/**
+ * Release a device mapped to a VFIO-managed I/O MMU group.
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param sysfs_base
+ * sysfs path prefix.
+ *
+ * @param dev_addr
+ * device location.
+ *
+ * @param fd
+ * VFIO fd.
+ *
+ * @return
+ * 0 on success.
+ * <0 on failure.
+ */
+int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
+
+/**
+ * Enable a VFIO-related kmod.
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param modname
+ * kernel module name.
+ *
+ * @return
+ * 0 on success.
+ * <0 on failure.
+ */
+int rte_vfio_enable(const char *modname);
+
+/**
+ * Check whether a VFIO-related kmod is enabled.
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param modname
+ * kernel module name.
+ *
+ * @return
+ * !0 if true.
+ * 0 otherwise.
+ */
+int rte_vfio_is_enabled(const char *modname);
+
+/**
+ * Whether VFIO NOIOMMU mode is enabled.
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @return
+ * !0 if true.
+ * 0 otherwise.
+ */
+int rte_vfio_noiommu_is_enabled(void);
+
+#endif /* VFIO_PRESENT */
+
+#endif /* _RTE_VFIO_H_ */
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 15076905..889dffd2 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -275,14 +275,14 @@ malloc_elem_free(struct malloc_elem *elem)
return -1;
rte_spinlock_lock(&(elem->heap->lock));
- size_t sz = elem->size - sizeof(*elem);
+ size_t sz = elem->size - sizeof(*elem) - MALLOC_ELEM_TRAILER_LEN;
uint8_t *ptr = (uint8_t *)&elem[1];
struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
if (next->state == ELEM_FREE){
/* remove from free list, join to this one */
elem_free_list_remove(next);
join_elem(elem, next);
- sz += sizeof(*elem);
+ sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
}
/* check if previous element is free, if so join with it and return,
@@ -291,8 +291,8 @@ malloc_elem_free(struct malloc_elem *elem)
if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
elem_free_list_remove(elem->prev);
join_elem(elem->prev, elem);
- sz += sizeof(*elem);
- ptr -= sizeof(*elem);
+ sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
+ ptr -= (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
elem = elem->prev;
}
malloc_elem_free_list_insert(elem);
diff --git a/lib/librte_eal/common/malloc_elem.h b/lib/librte_eal/common/malloc_elem.h
index f04b2d1e..ce39129d 100644
--- a/lib/librte_eal/common/malloc_elem.h
+++ b/lib/librte_eal/common/malloc_elem.h
@@ -53,13 +53,13 @@ struct malloc_elem {
volatile enum elem_state state;
uint32_t pad;
size_t size;
-#ifdef RTE_LIBRTE_MALLOC_DEBUG
+#ifdef RTE_MALLOC_DEBUG
uint64_t header_cookie; /* Cookie marking start of data */
/* trailer cookie at start + size */
#endif
} __rte_cache_aligned;
-#ifndef RTE_LIBRTE_MALLOC_DEBUG
+#ifndef RTE_MALLOC_DEBUG
static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
/* dummy function - just check if pointer is non-null */
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index 5c0627bf..fe2278bc 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -246,15 +246,22 @@ rte_malloc_set_limit(__rte_unused const char *type,
}
/*
- * Return the physical address of a virtual address obtained through rte_malloc
+ * Return the IO address of a virtual address obtained through rte_malloc
*/
-phys_addr_t
-rte_malloc_virt2phy(const void *addr)
+rte_iova_t
+rte_malloc_virt2iova(const void *addr)
{
+ rte_iova_t iova;
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
- return RTE_BAD_PHYS_ADDR;
- if (elem->ms->phys_addr == RTE_BAD_PHYS_ADDR)
- return RTE_BAD_PHYS_ADDR;
- return elem->ms->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->ms->addr);
+ return RTE_BAD_IOVA;
+ if (elem->ms->iova == RTE_BAD_IOVA)
+ return RTE_BAD_IOVA;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ iova = (uintptr_t)addr;
+ else
+ iova = elem->ms->iova +
+ RTE_PTR_DIFF(addr, elem->ms->addr);
+ return iova;
}
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
index 7efb76dc..09b758c9 100644
--- a/lib/librte_eal/common/rte_service.c
+++ b/lib/librte_eal/common/rte_service.c
@@ -54,6 +54,7 @@
#define SERVICE_F_REGISTERED (1 << 0)
#define SERVICE_F_STATS_ENABLED (1 << 1)
+#define SERVICE_F_START_CHECK (1 << 2)
/* runstates for services and lcores, denoting if they are active or not */
#define RUNSTATE_STOPPED 0
@@ -71,11 +72,12 @@ struct rte_service_spec_impl {
rte_atomic32_t execute_lock;
/* API set/get-able variables */
- int32_t runstate;
+ int8_t app_runstate;
+ int8_t comp_runstate;
uint8_t internal_flags;
/* per service statistics */
- uint32_t num_mapped_cores;
+ rte_atomic32_t num_mapped_cores;
uint64_t calls;
uint64_t cycles_spent;
} __rte_cache_aligned;
@@ -144,6 +146,13 @@ service_valid(uint32_t id)
return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
}
+/* validate ID and retrieve service pointer, or return error value */
+#define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
+ if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
+ return retval; \
+ service = &rte_services[id]; \
+} while (0)
+
/* returns 1 if statistics should be colleced for service
* Returns 0 if statistics should not be collected for service
*/
@@ -156,21 +165,31 @@ service_stats_enabled(struct rte_service_spec_impl *impl)
static inline int
service_mt_safe(struct rte_service_spec_impl *s)
{
- return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
+ return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
}
-int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
- int32_t enabled)
+int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
{
- struct rte_service_spec_impl *impl =
- (struct rte_service_spec_impl *)service;
- if (!impl)
- return -EINVAL;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
if (enabled)
- impl->internal_flags |= SERVICE_F_STATS_ENABLED;
+ s->internal_flags |= SERVICE_F_STATS_ENABLED;
else
- impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
+ s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
+
+ return 0;
+}
+
+int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
+{
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
+
+ if (enabled)
+ s->internal_flags |= SERVICE_F_START_CHECK;
+ else
+ s->internal_flags &= ~(SERVICE_F_START_CHECK);
return 0;
}
@@ -181,58 +200,42 @@ rte_service_get_count(void)
return rte_service_count;
}
-struct rte_service_spec *
-rte_service_get_by_id(uint32_t id)
+int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
{
- struct rte_service_spec *service = NULL;
- if (id < rte_service_count)
- service = (struct rte_service_spec *)&rte_services[id];
-
- return service;
-}
+ if (!service_id)
+ return -EINVAL;
-struct rte_service_spec *rte_service_get_by_name(const char *name)
-{
- struct rte_service_spec *service = NULL;
int i;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
if (service_valid(i) &&
strcmp(name, rte_services[i].spec.name) == 0) {
- service = (struct rte_service_spec *)&rte_services[i];
- break;
+ *service_id = i;
+ return 0;
}
}
- return service;
+ return -ENODEV;
}
const char *
-rte_service_get_name(const struct rte_service_spec *service)
+rte_service_get_name(uint32_t id)
{
- return service->name;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
+ return s->spec.name;
}
int32_t
-rte_service_probe_capability(const struct rte_service_spec *service,
- uint32_t capability)
+rte_service_probe_capability(uint32_t id, uint32_t capability)
{
- return service->capabilities & capability;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ return !!(s->spec.capabilities & capability);
}
int32_t
-rte_service_is_running(const struct rte_service_spec *spec)
-{
- const struct rte_service_spec_impl *impl =
- (const struct rte_service_spec_impl *)spec;
- if (!impl)
- return -EINVAL;
-
- return (impl->runstate == RUNSTATE_RUNNING) &&
- (impl->num_mapped_cores > 0);
-}
-
-int32_t
-rte_service_register(const struct rte_service_spec *spec)
+rte_service_component_register(const struct rte_service_spec *spec,
+ uint32_t *id_ptr)
{
uint32_t i;
int32_t free_slot = -1;
@@ -252,68 +255,161 @@ rte_service_register(const struct rte_service_spec *spec)
struct rte_service_spec_impl *s = &rte_services[free_slot];
s->spec = *spec;
- s->internal_flags |= SERVICE_F_REGISTERED;
+ s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
rte_smp_wmb();
rte_service_count++;
+ if (id_ptr)
+ *id_ptr = free_slot;
+
return 0;
}
int32_t
-rte_service_unregister(struct rte_service_spec *spec)
+rte_service_component_unregister(uint32_t id)
{
- struct rte_service_spec_impl *s = NULL;
- struct rte_service_spec_impl *spec_impl =
- (struct rte_service_spec_impl *)spec;
-
uint32_t i;
- uint32_t service_id;
- for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
- if (&rte_services[i] == spec_impl) {
- s = spec_impl;
- service_id = i;
- break;
- }
- }
-
- if (!s)
- return -EINVAL;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
rte_service_count--;
rte_smp_wmb();
s->internal_flags &= ~(SERVICE_F_REGISTERED);
+ /* clear the run-bit in all cores */
for (i = 0; i < RTE_MAX_LCORE; i++)
- lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id);
+ lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
- memset(&rte_services[service_id], 0,
- sizeof(struct rte_service_spec_impl));
+ memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
return 0;
}
int32_t
-rte_service_start(struct rte_service_spec *service)
+rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
{
- struct rte_service_spec_impl *s =
- (struct rte_service_spec_impl *)service;
- s->runstate = RUNSTATE_RUNNING;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ if (runstate)
+ s->comp_runstate = RUNSTATE_RUNNING;
+ else
+ s->comp_runstate = RUNSTATE_STOPPED;
+
rte_smp_wmb();
return 0;
}
int32_t
-rte_service_stop(struct rte_service_spec *service)
+rte_service_runstate_set(uint32_t id, uint32_t runstate)
{
- struct rte_service_spec_impl *s =
- (struct rte_service_spec_impl *)service;
- s->runstate = RUNSTATE_STOPPED;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ if (runstate)
+ s->app_runstate = RUNSTATE_RUNNING;
+ else
+ s->app_runstate = RUNSTATE_STOPPED;
+
rte_smp_wmb();
return 0;
}
+int32_t
+rte_service_runstate_get(uint32_t id)
+{
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ rte_smp_rmb();
+
+ int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
+ int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
+
+ return (s->app_runstate == RUNSTATE_RUNNING) &&
+ (s->comp_runstate == RUNSTATE_RUNNING) &&
+ (check_disabled | lcore_mapped);
+}
+
+static inline void
+rte_service_runner_do_callback(struct rte_service_spec_impl *s,
+ struct core_state *cs, uint32_t service_idx)
+{
+ void *userdata = s->spec.callback_userdata;
+
+ if (service_stats_enabled(s)) {
+ uint64_t start = rte_rdtsc();
+ s->spec.callback(userdata);
+ uint64_t end = rte_rdtsc();
+ s->cycles_spent += end - start;
+ cs->calls_per_service[service_idx]++;
+ s->calls++;
+ } else
+ s->spec.callback(userdata);
+}
+
+
+static inline int32_t
+service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
+{
+ if (!service_valid(i))
+ return -EINVAL;
+ struct rte_service_spec_impl *s = &rte_services[i];
+ if (s->comp_runstate != RUNSTATE_RUNNING ||
+ s->app_runstate != RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i)))
+ return -ENOEXEC;
+
+ /* check do we need cmpset, if MT safe or <= 1 core
+ * mapped, atomic ops are not required.
+ */
+ const int use_atomics = (service_mt_safe(s) == 0) &&
+ (rte_atomic32_read(&s->num_mapped_cores) > 1);
+ if (use_atomics) {
+ if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
+ return -EBUSY;
+
+ rte_service_runner_do_callback(s, cs, i);
+ rte_atomic32_clear(&s->execute_lock);
+ } else
+ rte_service_runner_do_callback(s, cs, i);
+
+ return 0;
+}
+
+int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
+ uint32_t serialize_mt_unsafe)
+{
+ /* run service on calling core, using all-ones as the service mask */
+ if (!service_valid(id))
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[rte_lcore_id()];
+ struct rte_service_spec_impl *s = &rte_services[id];
+
+ /* Atomically add this core to the mapped cores first, then examine if
+ * we can run the service. This avoids a race condition between
+ * checking the value, and atomically adding to the mapped count.
+ */
+ if (serialize_mt_unsafe)
+ rte_atomic32_inc(&s->num_mapped_cores);
+
+ if (service_mt_safe(s) == 0 &&
+ rte_atomic32_read(&s->num_mapped_cores) > 1) {
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+ return -EBUSY;
+ }
+
+ int ret = service_run(id, cs, UINT64_MAX);
+
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+
+ return ret;
+}
+
static int32_t
rte_service_runner_func(void *arg)
{
@@ -324,35 +420,10 @@ rte_service_runner_func(void *arg)
while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
const uint64_t service_mask = cs->service_mask;
- for (i = 0; i < rte_service_count; i++) {
- struct rte_service_spec_impl *s = &rte_services[i];
- if (s->runstate != RUNSTATE_RUNNING ||
- !(service_mask & (UINT64_C(1) << i)))
- continue;
- /* check do we need cmpset, if MT safe or <= 1 core
- * mapped, atomic ops are not required.
- */
- const int need_cmpset = !((service_mt_safe(s) == 0) &&
- (s->num_mapped_cores > 1));
- uint32_t *lock = (uint32_t *)&s->execute_lock;
-
- if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
- void *userdata = s->spec.callback_userdata;
-
- if (service_stats_enabled(s)) {
- uint64_t start = rte_rdtsc();
- s->spec.callback(userdata);
- uint64_t end = rte_rdtsc();
- s->cycles_spent += end - start;
- cs->calls_per_service[i]++;
- s->calls++;
- } else
- s->spec.callback(userdata);
-
- if (need_cmpset)
- rte_atomic32_clear(&s->execute_lock);
- }
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ /* return value ignored as no change to code flow */
+ service_run(i, cs, service_mask);
}
rte_smp_rmb();
@@ -397,6 +468,19 @@ rte_service_lcore_list(uint32_t array[], uint32_t n)
}
int32_t
+rte_service_lcore_count_services(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -ENOTSUP;
+
+ return __builtin_popcountll(cs->service_mask);
+}
+
+int32_t
rte_service_start_with_defaults(void)
{
/* create a default mapping from cores to services, then start the
@@ -407,7 +491,7 @@ rte_service_start_with_defaults(void)
uint32_t count = rte_service_get_count();
int32_t lcore_iter = 0;
- uint32_t ids[RTE_MAX_LCORE];
+ uint32_t ids[RTE_MAX_LCORE] = {0};
int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
if (lcore_count == 0)
@@ -417,16 +501,12 @@ rte_service_start_with_defaults(void)
rte_service_lcore_start(ids[i]);
for (i = 0; i < count; i++) {
- struct rte_service_spec *s = rte_service_get_by_id(i);
- if (!s)
- return -EINVAL;
-
/* do 1:1 core mapping here, with each service getting
* assigned a single core by default. Adding multiple services
* should multiplex to a single core, or 1:1 if there are the
* same amount of services as service-cores
*/
- ret = rte_service_enable_on_lcore(s, ids[lcore_iter]);
+ ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
if (ret)
return -ENODEV;
@@ -434,7 +514,7 @@ rte_service_start_with_defaults(void)
if (lcore_iter >= lcore_count)
lcore_iter = 0;
- ret = rte_service_start(s);
+ ret = rte_service_runstate_set(i, 1);
if (ret)
return -ENOEXEC;
}
@@ -467,43 +547,40 @@ service_update(struct rte_service_spec *service, uint32_t lcore,
if (set) {
if (*set) {
lcore_states[lcore].service_mask |= sid_mask;
- rte_services[sid].num_mapped_cores++;
+ rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
} else {
lcore_states[lcore].service_mask &= ~(sid_mask);
- rte_services[sid].num_mapped_cores--;
+ rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
}
}
if (enabled)
- *enabled = (lcore_states[lcore].service_mask & (sid_mask));
+ *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
rte_smp_wmb();
return 0;
}
-int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
- uint32_t lcore)
-{
- uint32_t enabled;
- int ret = service_update(service, lcore, 0, &enabled);
- if (ret == 0)
- return enabled;
- return -EINVAL;
-}
-
int32_t
-rte_service_enable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
{
- uint32_t on = 1;
- return service_update(service, lcore, &on, 0);
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ uint32_t on = enabled > 0;
+ return service_update(&s->spec, lcore, &on, 0);
}
int32_t
-rte_service_disable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
{
- uint32_t off = 0;
- return service_update(service, lcore, &off, 0);
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ uint32_t enabled;
+ int ret = service_update(&s->spec, lcore, 0, &enabled);
+ if (ret == 0)
+ return enabled;
+ return ret;
}
int32_t rte_service_lcore_reset_all(void)
@@ -516,7 +593,7 @@ int32_t rte_service_lcore_reset_all(void)
lcore_states[i].runstate = RUNSTATE_STOPPED;
}
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
- rte_services[i].num_mapped_cores = 0;
+ rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
rte_smp_wmb();
@@ -552,7 +629,8 @@ rte_service_lcore_add(uint32_t lcore)
lcore_states[lcore].runstate = RUNSTATE_STOPPED;
rte_smp_wmb();
- return 0;
+
+ return rte_eal_wait_lcore(lcore);
}
int32_t
@@ -607,12 +685,12 @@ rte_service_lcore_stop(uint32_t lcore)
return -EALREADY;
uint32_t i;
+ uint64_t service_mask = lcore_states[lcore].service_mask;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
- int32_t enabled =
- lcore_states[i].service_mask & (UINT64_C(1) << i);
- int32_t service_running = rte_services[i].runstate !=
- RUNSTATE_STOPPED;
- int32_t only_core = rte_services[i].num_mapped_cores == 1;
+ int32_t enabled = service_mask & (UINT64_C(1) << i);
+ int32_t service_running = rte_service_runstate_get(i);
+ int32_t only_core = (1 ==
+ rte_atomic32_read(&rte_services[i].num_mapped_cores));
/* if the core is mapped, and the service is running, and this
* is the only core that is mapped, the service would cease to
@@ -667,28 +745,34 @@ service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
fprintf(f, "\n");
}
-int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
+int32_t rte_service_dump(FILE *f, uint32_t id)
{
uint32_t i;
+ int print_one = (id != UINT32_MAX);
uint64_t total_cycles = 0;
- for (i = 0; i < rte_service_count; i++) {
+
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
if (!service_valid(i))
continue;
total_cycles += rte_services[i].cycles_spent;
}
- if (service) {
- struct rte_service_spec_impl *s =
- (struct rte_service_spec_impl *)service;
+ /* print only the specified service */
+ if (print_one) {
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
fprintf(f, "Service %s Summary\n", s->spec.name);
uint32_t reset = 0;
rte_service_dump_one(f, s, total_cycles, reset);
return 0;
}
+ /* print all services, as UINT32_MAX was passed as id */
fprintf(f, "Services Summary\n");
- for (i = 0; i < rte_service_count; i++) {
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
+ if (!service_valid(i))
+ continue;
uint32_t reset = 1;
rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
}
@@ -698,7 +782,7 @@ int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
if (lcore_config[i].core_role != ROLE_SERVICE)
continue;
- uint32_t reset = 0;
+ uint32_t reset = 1;
service_dump_calls_per_lcore(f, i, reset);
}
diff --git a/lib/librte_eal/linuxapp/Makefile b/lib/librte_eal/linuxapp/Makefile
index 4794696b..2ebdf313 100644
--- a/lib/librte_eal/linuxapp/Makefile
+++ b/lib/librte_eal/linuxapp/Makefile
@@ -35,7 +35,5 @@ DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal
DIRS-$(CONFIG_RTE_EAL_IGB_UIO) += igb_uio
DIRS-$(CONFIG_RTE_KNI_KMOD) += kni
DEPDIRS-kni := eal
-DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += xen_dom0
-DEPDIRS-xen_dom0 := eal
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 90bca4d6..5a7b8b2a 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -34,10 +34,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eal.a
ARCH_DIR ?= $(RTE_ARCH)
-EXPORT_MAP := rte_eal_version.map
+EXPORT_MAP := ../../rte_eal_version.map
VPATH += $(RTE_SDK)/lib/librte_eal/common/arch/$(ARCH_DIR)
-LIBABIVER := 5
+LIBABIVER := 6
VPATH += $(RTE_SDK)/lib/librte_eal/common
@@ -58,16 +58,10 @@ endif
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_hugepage_info.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_memory.c
-ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_xen_memory.c
-endif
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_mp_sync.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci_uio.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci_vfio.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_debug.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c
@@ -80,9 +74,6 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_launch.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_vdev.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_pci.c
-SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_pci_uio.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_tailqs.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_errno.c
@@ -104,6 +95,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_service.c
# from arch dir
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_cpuflags.c
SRCS-$(CONFIG_RTE_ARCH_X86) += rte_spinlock.c
+SRCS-y += rte_cycles.c
CFLAGS_eal_common_cpuflags.o := $(CPUFLAGS_LIST)
@@ -116,13 +108,11 @@ CFLAGS_eal_thread.o := -D_GNU_SOURCE
CFLAGS_eal_log.o := -D_GNU_SOURCE
CFLAGS_eal_common_log.o := -D_GNU_SOURCE
CFLAGS_eal_hugepage_info.o := -D_GNU_SOURCE
-CFLAGS_eal_pci.o := -D_GNU_SOURCE
-CFLAGS_eal_pci_uio.o := -D_GNU_SOURCE
-CFLAGS_eal_pci_vfio.o := -D_GNU_SOURCE
CFLAGS_eal_common_whitelist.o := -D_GNU_SOURCE
CFLAGS_eal_common_options.o := -D_GNU_SOURCE
CFLAGS_eal_common_thread.o := -D_GNU_SOURCE
CFLAGS_eal_common_lcore.o := -D_GNU_SOURCE
+CFLAGS_rte_cycles.o := -D_GNU_SOURCE
# workaround for a gcc bug with noreturn attribute
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
@@ -130,7 +120,7 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_eal_thread.o += -Wno-return-type
endif
-INC := rte_interrupts.h rte_kni_common.h rte_dom0_common.h
+INC := rte_kni_common.h
SYMLINK-$(CONFIG_RTE_EXEC_ENV_LINUXAPP)-include/exec-env := \
$(addprefix include/exec-env/,$(INC))
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 48f12f44..229eec9f 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -56,7 +56,6 @@
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
@@ -71,12 +70,12 @@
#include <rte_cpuflags.h>
#include <rte_interrupts.h>
#include <rte_bus.h>
-#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_version.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
+#include <rte_vfio.h>
#include "eal_private.h"
#include "eal_thread.h"
@@ -121,6 +120,13 @@ struct internal_config internal_config;
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
+/* Return mbuf pool ops name */
+const char *
+rte_eal_mbuf_default_mempool_ops(void)
+{
+ return internal_config.mbuf_pool_ops_name;
+}
+
/* Return a pointer to the configuration structure */
struct rte_config *
rte_eal_get_configuration(void)
@@ -128,6 +134,12 @@ rte_eal_get_configuration(void)
return &rte_config;
}
+enum rte_iova_mode
+rte_eal_iova_mode(void)
+{
+ return rte_eal_get_configuration()->iova_mode;
+}
+
/* parse a sysfs (or other) file containing one integer value */
int
eal_parse_sysfs_value(const char *filename, unsigned long *val)
@@ -354,7 +366,6 @@ eal_usage(const char *prgname)
" --"OPT_BASE_VIRTADDR" Base virtual address\n"
" --"OPT_CREATE_UIO_DEV" Create /dev/uioX (usually done by hotplug)\n"
" --"OPT_VFIO_INTR" Interrupt mode for VFIO (legacy|msi|msix)\n"
- " --"OPT_XEN_DOM0" Support running on Xen dom0 without hugetlbfs\n"
"\n");
/* Allow the application to print its usage message too if hook is set */
if ( rte_application_usage_hook ) {
@@ -555,25 +566,12 @@ eal_parse_args(int argc, char **argv)
eal_usage(prgname);
exit(EXIT_SUCCESS);
- /* long options */
- case OPT_XEN_DOM0_NUM:
-#ifdef RTE_LIBRTE_XEN_DOM0
- internal_config.xen_dom0_support = 1;
-#else
- RTE_LOG(ERR, EAL, "Can't support DPDK app "
- "running on Dom0, please configure"
- " RTE_LIBRTE_XEN_DOM0=y\n");
- ret = -1;
- goto out;
-#endif
- break;
-
case OPT_HUGE_DIR_NUM:
- internal_config.hugepage_dir = optarg;
+ internal_config.hugepage_dir = strdup(optarg);
break;
case OPT_FILE_PREFIX_NUM:
- internal_config.hugefile_prefix = optarg;
+ internal_config.hugefile_prefix = strdup(optarg);
break;
case OPT_SOCKET_MEM_NUM:
@@ -610,6 +608,10 @@ eal_parse_args(int argc, char **argv)
internal_config.create_uio_dev = 1;
break;
+ case OPT_MBUF_POOL_OPS_NAME_NUM:
+ internal_config.mbuf_pool_ops_name = optarg;
+ break;
+
default:
if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
RTE_LOG(ERR, EAL, "Option %c is not supported "
@@ -641,15 +643,6 @@ eal_parse_args(int argc, char **argv)
goto out;
}
- /* --xen-dom0 doesn't make sense with --socket-mem */
- if (internal_config.xen_dom0_support && internal_config.force_sockets == 1) {
- RTE_LOG(ERR, EAL, "Options --"OPT_SOCKET_MEM" cannot be specified "
- "together with --"OPT_XEN_DOM0"\n");
- eal_usage(prgname);
- ret = -1;
- goto out;
- }
-
if (optind >= 0)
argv[optind-1] = prgname;
ret = optind-1;
@@ -716,10 +709,9 @@ static int rte_eal_vfio_setup(void)
{
int vfio_enabled = 0;
- if (!internal_config.no_pci) {
- pci_vfio_enable();
- vfio_enabled |= pci_vfio_is_enabled();
- }
+ if (rte_vfio_enable("vfio"))
+ return -1;
+ vfio_enabled = rte_vfio_is_enabled("vfio");
if (vfio_enabled) {
@@ -792,9 +784,40 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ if (eal_plugins_init() < 0) {
+ rte_eal_init_alert("Cannot init plugins\n");
+ rte_errno = EINVAL;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ if (eal_option_device_parse()) {
+ rte_errno = ENODEV;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ if (rte_bus_scan()) {
+ rte_eal_init_alert("Cannot scan the buses for devices\n");
+ rte_errno = ENODEV;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
+
+ /* autodetect the iova mapping mode (default is iova_pa) */
+ rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
+
+ /* Workaround for KNI which requires physical address to work */
+ if (rte_eal_get_configuration()->iova_mode == RTE_IOVA_VA &&
+ rte_eal_check_module("rte_kni") == 1) {
+ rte_eal_get_configuration()->iova_mode = RTE_IOVA_PA;
+ RTE_LOG(WARNING, EAL,
+ "Some devices want IOVA as VA but PA will be used because.. "
+ "KNI module inserted\n");
+ }
+
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
- internal_config.xen_dom0_support == 0 &&
eal_hugepage_info_init() < 0) {
rte_eal_init_alert("Cannot get hugepage information.");
rte_errno = EACCES;
@@ -873,9 +896,6 @@ rte_eal_init(int argc, char **argv)
eal_check_mem_on_local_socket();
- if (eal_plugins_init() < 0)
- rte_eal_init_alert("Cannot init plugins\n");
-
eal_thread_init_master(rte_config.master_lcore);
ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
@@ -889,17 +909,6 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (eal_option_device_parse()) {
- rte_errno = ENODEV;
- return -1;
- }
-
- if (rte_bus_scan()) {
- rte_eal_init_alert("Cannot scan the buses for devices\n");
- rte_errno = ENODEV;
- return -1;
- }
-
RTE_LCORE_FOREACH_SLAVE(i) {
/*
@@ -983,6 +992,22 @@ int rte_eal_has_hugepages(void)
return ! internal_config.no_hugetlbfs;
}
+int rte_eal_has_pci(void)
+{
+ return !internal_config.no_pci;
+}
+
+int rte_eal_create_uio_dev(void)
+{
+ return internal_config.create_uio_dev;
+}
+
+enum rte_intr_mode
+rte_eal_vfio_intr_mode(void)
+{
+ return internal_config.vfio_intr_mode;
+}
+
int
rte_eal_check_module(const char *module_name)
{
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index fbae4613..8e4a775b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -40,7 +40,6 @@
#include <sys/timerfd.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
#include <rte_common.h>
diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 7a21e8f6..86e174fc 100644
--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -46,7 +46,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 3e9ac41e..1c20693d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -51,7 +51,6 @@
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -60,7 +59,6 @@
#include <rte_branch_prediction.h>
#include <rte_debug.h>
#include <rte_log.h>
-#include <rte_pci.h>
#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_spinlock.h>
@@ -914,7 +912,7 @@ static void
eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
{
union rte_intr_read_buffer buf;
- int bytes_read = 1;
+ int bytes_read = 0;
int nbytes;
switch (intr_handle->type) {
@@ -930,11 +928,9 @@ eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
break;
#endif
case RTE_INTR_HANDLE_VDEV:
- /* for vdev, fd points to:
- * a. eventfd which does not need to read out;
- * b. datapath fd which needs PMD to read out.
- */
- return;
+ bytes_read = intr_handle->efd_counter_size;
+ /* For vdev, number of bytes to read is set by driver */
+ break;
case RTE_INTR_HANDLE_EXT:
return;
default:
@@ -947,6 +943,8 @@ eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
* read out to clear the ready-to-be-read flag
* for epoll_wait.
*/
+ if (bytes_read == 0)
+ return;
do {
nbytes = read(fd, &buf, bytes_read);
if (nbytes < 0) {
@@ -1206,7 +1204,12 @@ rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
intr_handle->nb_efd = n;
intr_handle->max_intr = NB_OTHER_INTR + n;
} else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
- /* do nothing, and let vdev driver to initialize this struct */
+ /* only check, initialization would be done in vdev driver.*/
+ if (intr_handle->efd_counter_size >
+ sizeof(union rte_intr_read_buffer)) {
+ RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
+ return -EINVAL;
+ }
} else {
intr_handle->efds[0] = intr_handle->fd;
intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
diff --git a/lib/librte_eal/linuxapp/eal/eal_log.c b/lib/librte_eal/linuxapp/eal/eal_log.c
index e3a50aa3..c088bd9b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_log.c
+++ b/lib/librte_eal/linuxapp/eal/eal_log.c
@@ -39,7 +39,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 52791282..a54b822a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -59,7 +59,6 @@
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
@@ -75,13 +74,6 @@
#define PFN_MASK_SIZE 8
-#ifdef RTE_LIBRTE_XEN_DOM0
-int rte_xen_dom0_supported(void)
-{
- return internal_config.xen_dom0_support;
-}
-#endif
-
/**
* @file
* Huge page mapping under linux
@@ -106,10 +98,6 @@ test_phys_addrs_available(void)
uint64_t tmp;
phys_addr_t physaddr;
- /* For dom0, phys addresses can always be available */
- if (rte_xen_dom0_supported())
- return;
-
if (!rte_eal_has_hugepages()) {
RTE_LOG(ERR, EAL,
"Started without hugepages support, physical addresses not available\n");
@@ -119,10 +107,11 @@ test_phys_addrs_available(void)
physaddr = rte_mem_virt2phy(&tmp);
if (physaddr == RTE_BAD_PHYS_ADDR) {
- RTE_LOG(ERR, EAL,
- "Cannot obtain physical addresses: %s. "
- "Only vfio will function.\n",
- strerror(errno));
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ RTE_LOG(ERR, EAL,
+ "Cannot obtain physical addresses: %s. "
+ "Only vfio will function.\n",
+ strerror(errno));
phys_addrs_available = false;
}
}
@@ -139,32 +128,9 @@ rte_mem_virt2phy(const void *virtaddr)
int page_size;
off_t offset;
- /* when using dom0, /proc/self/pagemap always returns 0, check in
- * dpdk memory by browsing the memsegs */
- if (rte_xen_dom0_supported()) {
- struct rte_mem_config *mcfg;
- struct rte_memseg *memseg;
- unsigned i;
-
- mcfg = rte_eal_get_configuration()->mem_config;
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- memseg = &mcfg->memseg[i];
- if (memseg->addr == NULL)
- break;
- if (virtaddr > memseg->addr &&
- virtaddr < RTE_PTR_ADD(memseg->addr,
- memseg->len)) {
- return memseg->phys_addr +
- RTE_PTR_DIFF(virtaddr, memseg->addr);
- }
- }
-
- return RTE_BAD_PHYS_ADDR;
- }
-
/* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
if (!phys_addrs_available)
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
/* standard page size */
page_size = getpagesize();
@@ -173,7 +139,7 @@ rte_mem_virt2phy(const void *virtaddr)
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
__func__, strerror(errno));
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
}
virt_pfn = (unsigned long)virtaddr / page_size;
@@ -182,7 +148,7 @@ rte_mem_virt2phy(const void *virtaddr)
RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
__func__, strerror(errno));
close(fd);
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
}
retval = read(fd, &page, PFN_MASK_SIZE);
@@ -190,12 +156,12 @@ rte_mem_virt2phy(const void *virtaddr)
if (retval < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
__func__, strerror(errno));
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
} else if (retval != PFN_MASK_SIZE) {
RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
"but expected %d:\n",
__func__, retval, PFN_MASK_SIZE);
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
}
/*
@@ -203,7 +169,7 @@ rte_mem_virt2phy(const void *virtaddr)
* pagemap.txt in linux Documentation)
*/
if ((page & 0x7fffffffffffffULL) == 0)
- return RTE_BAD_PHYS_ADDR;
+ return RTE_BAD_IOVA;
physaddr = ((page & 0x7fffffffffffffULL) * page_size)
+ ((unsigned long)virtaddr % page_size);
@@ -211,6 +177,14 @@ rte_mem_virt2phy(const void *virtaddr)
return physaddr;
}
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ return (uintptr_t)virtaddr;
+ return rte_mem_virt2phy(virtaddr);
+}
+
/*
* For each hugepage in hugepg_tbl, fill the physaddr value. We find
* it by browsing the /proc/self/pagemap special file.
@@ -716,6 +690,8 @@ create_shared_memory(const char *filename, const size_t mem_size)
}
retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
+ if (retval == MAP_FAILED)
+ return NULL;
return retval;
}
@@ -1059,7 +1035,10 @@ rte_eal_hugepage_init(void)
strerror(errno));
return -1;
}
- mcfg->memseg[0].phys_addr = RTE_BAD_PHYS_ADDR;
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ mcfg->memseg[0].iova = (uintptr_t)addr;
+ else
+ mcfg->memseg[0].iova = RTE_BAD_IOVA;
mcfg->memseg[0].addr = addr;
mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
mcfg->memseg[0].len = internal_config.memory;
@@ -1067,17 +1046,6 @@ rte_eal_hugepage_init(void)
return 0;
}
-/* check if app runs on Xen Dom0 */
- if (internal_config.xen_dom0_support) {
-#ifdef RTE_LIBRTE_XEN_DOM0
- /* use dom0_mm kernel driver to init memory */
- if (rte_xen_dom0_memory_init() < 0)
- return -1;
- else
- return 0;
-#endif
- }
-
/* calculate total number of hugepages available. at this point we haven't
* yet started sorting them so they all are on socket 0 */
for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
@@ -1319,7 +1287,7 @@ rte_eal_hugepage_init(void)
if (j == RTE_MAX_MEMSEG)
break;
- mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+ mcfg->memseg[j].iova = hugepage[i].physaddr;
mcfg->memseg[j].addr = hugepage[i].final_va;
mcfg->memseg[j].len = hugepage[i].size;
mcfg->memseg[j].socket_id = hugepage[i].socket_id;
@@ -1330,7 +1298,7 @@ rte_eal_hugepage_init(void)
#ifdef RTE_ARCH_PPC_64
/* Use the phy and virt address of the last page as segment
* address for IBM Power architecture */
- mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+ mcfg->memseg[j].iova = hugepage[i].physaddr;
mcfg->memseg[j].addr = hugepage[i].final_va;
#endif
mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
@@ -1400,17 +1368,6 @@ rte_eal_hugepage_attach(void)
test_phys_addrs_available();
- if (internal_config.xen_dom0_support) {
-#ifdef RTE_LIBRTE_XEN_DOM0
- if (rte_xen_dom0_memory_attach() < 0) {
- RTE_LOG(ERR, EAL, "Failed to attach memory segments of primary "
- "process\n");
- return -1;
- }
- return 0;
-#endif
- }
-
fd_zero = open("/dev/zero", O_RDONLY);
if (fd_zero < 0) {
RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
@@ -1542,7 +1499,7 @@ error:
return -1;
}
-bool
+int
rte_eal_using_phys_addrs(void)
{
return phys_addrs_available;
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index 6481eeea..e9a579e4 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -46,7 +46,6 @@
#include <rte_launch.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_eal.h>
#include <rte_lcore.h>
diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c
index afa32f5c..24349dab 100644
--- a/lib/librte_eal/linuxapp/eal/eal_timer.c
+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_debug.h>
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 946df7e3..58f0123e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -39,6 +39,7 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_eal_memconfig.h>
+#include <rte_vfio.h>
#include "eal_filesystem.h"
#include "eal_vfio.h"
@@ -68,8 +69,8 @@ vfio_get_group_fd(int iommu_group_no)
{
int i;
int vfio_group_fd;
- int group_idx = -1;
char filename[PATH_MAX];
+ struct vfio_group *cur_grp;
/* check if we already have the group descriptor open */
for (i = 0; i < VFIO_MAX_GROUPS; i++)
@@ -85,12 +86,12 @@ vfio_get_group_fd(int iommu_group_no)
/* Now lets get an index for the new group */
for (i = 0; i < VFIO_MAX_GROUPS; i++)
if (vfio_cfg.vfio_groups[i].group_no == -1) {
- group_idx = i;
+ cur_grp = &vfio_cfg.vfio_groups[i];
break;
}
/* This should not happen */
- if (group_idx == -1) {
+ if (i == VFIO_MAX_GROUPS) {
RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
return -1;
}
@@ -123,8 +124,8 @@ vfio_get_group_fd(int iommu_group_no)
/* noiommu group found */
}
- vfio_cfg.vfio_groups[group_idx].group_no = iommu_group_no;
- vfio_cfg.vfio_groups[group_idx].fd = vfio_group_fd;
+ cur_grp->group_no = iommu_group_no;
+ cur_grp->fd = vfio_group_fd;
vfio_cfg.vfio_active_groups++;
return vfio_group_fd;
}
@@ -157,9 +158,12 @@ vfio_get_group_fd(int iommu_group_no)
return 0;
case SOCKET_OK:
vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
- /* if we got the fd, return it */
+ /* if we got the fd, store it and return it */
if (vfio_group_fd > 0) {
close(socket_fd);
+ cur_grp->group_no = iommu_group_no;
+ cur_grp->fd = vfio_group_fd;
+ vfio_cfg.vfio_active_groups++;
return vfio_group_fd;
}
/* fall-through on error */
@@ -280,7 +284,7 @@ clear_group(int vfio_group_fd)
}
int
-vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
int *vfio_dev_fd, struct vfio_device_info *device_info)
{
struct vfio_group_status group_status = {
@@ -412,7 +416,7 @@ vfio_setup_device(const char *sysfs_base, const char *dev_addr,
}
int
-vfio_release_device(const char *sysfs_base, const char *dev_addr,
+rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
int vfio_dev_fd)
{
struct vfio_group_status group_status = {
@@ -474,7 +478,7 @@ vfio_release_device(const char *sysfs_base, const char *dev_addr,
}
int
-vfio_enable(const char *modname)
+rte_vfio_enable(const char *modname)
{
/* initialize group list */
int i;
@@ -489,7 +493,7 @@ vfio_enable(const char *modname)
/* inform the user that we are probing for VFIO */
RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
- /* check if vfio-pci module is loaded */
+ /* check if vfio module is loaded */
vfio_available = rte_eal_check_module(modname);
/* return error directly */
@@ -519,7 +523,7 @@ vfio_enable(const char *modname)
}
int
-vfio_is_enabled(const char *modname)
+rte_vfio_is_enabled(const char *modname)
{
const int mod_available = rte_eal_check_module(modname);
return vfio_cfg.vfio_enabled && mod_available;
@@ -706,7 +710,10 @@ vfio_type1_dma_map(int vfio_container_fd)
dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
dma_map.vaddr = ms[i].addr_64;
dma_map.size = ms[i].len;
- dma_map.iova = ms[i].phys_addr;
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dma_map.iova = dma_map.vaddr;
+ else
+ dma_map.iova = ms[i].iova;
dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
@@ -759,10 +766,19 @@ vfio_spapr_dma_map(int vfio_container_fd)
return -1;
}
- /* calculate window size based on number of hugepages configured */
- create.window_size = rte_eal_get_physmem_size();
+ /* create DMA window from 0 to max(phys_addr + len) */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (ms[i].addr == NULL)
+ break;
+
+ create.window_size = RTE_MAX(create.window_size,
+ ms[i].iova + ms[i].len);
+ }
+
+ /* sPAPR requires window size to be a power of 2 */
+ create.window_size = rte_align64pow2(create.window_size);
create.page_shift = __builtin_ctzll(ms->hugepage_sz);
- create.levels = 2;
+ create.levels = 1;
ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
if (ret) {
@@ -771,6 +787,11 @@ vfio_spapr_dma_map(int vfio_container_fd)
return -1;
}
+ if (create.start_addr != 0) {
+ RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
+ return -1;
+ }
+
/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
struct vfio_iommu_type1_dma_map dma_map;
@@ -792,7 +813,10 @@ vfio_spapr_dma_map(int vfio_container_fd)
dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
dma_map.vaddr = ms[i].addr_64;
dma_map.size = ms[i].len;
- dma_map.iova = ms[i].phys_addr;
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dma_map.iova = dma_map.vaddr;
+ else
+ dma_map.iova = ms[i].iova;
dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
VFIO_DMA_MAP_FLAG_WRITE;
@@ -816,4 +840,23 @@ vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
return 0;
}
+int
+rte_vfio_noiommu_is_enabled(void)
+{
+ int fd, ret, cnt __rte_unused;
+ char c;
+
+ ret = -1;
+ fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ cnt = read(fd, &c, 1);
+ if (c == 'Y')
+ ret = 1;
+
+ close(fd);
+ return ret;
+}
+
#endif
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h
index 5ff63e5d..ba7892b7 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.h
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h
@@ -37,20 +37,18 @@
/*
* determine if VFIO is present on the system
*/
-#ifdef RTE_EAL_VFIO
+#if !defined(VFIO_PRESENT) && defined(RTE_EAL_VFIO)
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
-#include <linux/vfio.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
-#define RTE_PCI_MSIX_TABLE_BIR 0x7
-#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
-#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
+#define VFIO_PRESENT
#else
-#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
-#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
-#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
-#endif
+#pragma message("VFIO configured but not supported by this kernel, disabling.")
+#endif /* kernel version >= 3.6.0 */
+#endif /* RTE_EAL_VFIO */
+
+#ifdef VFIO_PRESENT
+
+#include <linux/vfio.h>
#define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
@@ -144,13 +142,6 @@ struct vfio_config {
struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
};
-#define VFIO_DIR "/dev/vfio"
-#define VFIO_CONTAINER_PATH "/dev/vfio/vfio"
-#define VFIO_GROUP_FMT "/dev/vfio/%u"
-#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u"
-#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL)
-#define VFIO_GET_REGION_IDX(x) (x >> 40)
-
/* DMA mapping function prototype.
* Takes VFIO container fd as a parameter.
* Returns 0 on success, -1 on error.
@@ -190,24 +181,6 @@ vfio_get_group_fd(int iommu_group_no);
int
clear_group(int vfio_group_fd);
-/**
- * Setup vfio_cfg for the device identified by its address. It discovers
- * the configured I/O MMU groups or sets a new one for the device. If a new
- * groups is assigned, the DMA mapping is performed.
- * Returns 0 on success, a negative value on failure and a positive value in
- * case the given device cannot be managed this way.
- */
-int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
- int *vfio_dev_fd, struct vfio_device_info *device_info);
-
-int vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
-
-int vfio_enable(const char *modname);
-int vfio_is_enabled(const char *modname);
-
-int pci_vfio_enable(void);
-int pci_vfio_is_enabled(void);
-
int vfio_mp_sync_setup(void);
#define SOCKET_REQ_CONTAINER 0x100
@@ -217,8 +190,6 @@ int vfio_mp_sync_setup(void);
#define SOCKET_NO_FD 0x1
#define SOCKET_ERR 0xFF
-#define VFIO_PRESENT
-#endif /* kernel version */
-#endif /* RTE_EAL_VFIO */
+#endif /* VFIO_PRESENT */
#endif /* EAL_VFIO_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
index 7e8095cb..b53ed7eb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
@@ -49,12 +49,12 @@
#endif
#include <rte_log.h>
-#include <rte_pci.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
+#include <rte_vfio.h>
#include "eal_filesystem.h"
-#include "eal_pci_init.h"
+#include "eal_vfio.h"
#include "eal_thread.h"
/**
@@ -301,7 +301,8 @@ vfio_mp_sync_thread(void __rte_unused * arg)
vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
else
vfio_mp_sync_send_fd(conn_sock, fd);
- close(fd);
+ if (fd >= 0)
+ close(fd);
break;
case SOCKET_REQ_GROUP:
/* wait for group number */
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
deleted file mode 100644
index 19db1cb5..00000000
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ /dev/null
@@ -1,381 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <errno.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/queue.h>
-#include <sys/file.h>
-#include <unistd.h>
-#include <limits.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_common.h>
-#include <rte_string_fns.h>
-
-#include "eal_private.h"
-#include "eal_internal_cfg.h"
-#include "eal_filesystem.h"
-#include <exec-env/rte_dom0_common.h>
-
-#define PAGE_SIZE RTE_PGSIZE_4K
-#define DEFAUL_DOM0_NAME "dom0-mem"
-
-static int xen_fd = -1;
-static const char sys_dir_path[] = "/sys/kernel/mm/dom0-mm/memsize-mB";
-
-/*
- * Try to mmap *size bytes in /dev/zero. If it is successful, return the
- * pointer to the mmap'd area and keep *size unmodified. Else, retry
- * with a smaller zone: decrease *size by mem_size until it reaches
- * 0. In this case, return NULL. Note: this function returns an address
- * which is a multiple of mem_size size.
- */
-static void *
-xen_get_virtual_area(size_t *size, size_t mem_size)
-{
- void *addr;
- int fd;
- long aligned_addr;
-
- RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zu bytes\n", *size);
-
- fd = open("/dev/zero", O_RDONLY);
- if (fd < 0){
- RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
- return NULL;
- }
- do {
- addr = mmap(NULL, (*size) + mem_size, PROT_READ,
- MAP_PRIVATE, fd, 0);
- if (addr == MAP_FAILED)
- *size -= mem_size;
- } while (addr == MAP_FAILED && *size > 0);
-
- if (addr == MAP_FAILED) {
- close(fd);
- RTE_LOG(ERR, EAL, "Cannot get a virtual area\n");
- return NULL;
- }
-
- munmap(addr, (*size) + mem_size);
- close(fd);
-
- /* align addr to a mem_size boundary */
- aligned_addr = (uintptr_t)addr;
- aligned_addr = RTE_ALIGN_CEIL(aligned_addr, mem_size);
- addr = (void *)(aligned_addr);
-
- RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
- addr, *size);
-
- return addr;
-}
-
-/**
- * Get memory size configuration from /sys/devices/virtual/misc/dom0_mm
- * /memsize-mB/memsize file, and the size unit is mB.
- */
-static int
-get_xen_memory_size(void)
-{
- char path[PATH_MAX];
- unsigned long mem_size = 0;
- static const char *file_name;
-
- file_name = "memsize";
- snprintf(path, sizeof(path), "%s/%s",
- sys_dir_path, file_name);
-
- if (eal_parse_sysfs_value(path, &mem_size) < 0)
- return -1;
-
- if (mem_size == 0)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s was not"
- " configured.\n",sys_dir_path, file_name);
- if (mem_size % 2)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s must be"
- " even number.\n",sys_dir_path, file_name);
-
- if (mem_size > DOM0_CONFIG_MEMSIZE)
- rte_exit(EXIT_FAILURE,"XEN-DOM0:the %s/%s should not be larger"
- " than %d mB\n",sys_dir_path, file_name, DOM0_CONFIG_MEMSIZE);
-
- return mem_size;
-}
-
-/**
- * Based on physical address to caculate MFN in Xen Dom0.
- */
-phys_addr_t
-rte_xen_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
-{
- int mfn_id, i;
- uint64_t mfn, mfn_offset;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- struct rte_memseg *memseg = mcfg->memseg;
-
- /* find the memory segment owning the physical address */
- if (memseg_id == -1) {
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if ((phy_addr >= memseg[i].phys_addr) &&
- (phy_addr < memseg[i].phys_addr +
- memseg[i].len)) {
- memseg_id = i;
- break;
- }
- }
- if (memseg_id == -1)
- return RTE_BAD_PHYS_ADDR;
- }
-
- mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;
-
- /*the MFN is contiguous in 2M */
- mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
- RTE_PGSIZE_2M / PAGE_SIZE;
- mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
-
- /** return mechine address */
- return mfn * PAGE_SIZE + phy_addr % PAGE_SIZE;
-}
-
-int
-rte_xen_dom0_memory_init(void)
-{
- void *vir_addr, *vma_addr = NULL;
- int err, ret = 0;
- uint32_t i, requested, mem_size, memseg_idx, num_memseg = 0;
- size_t vma_len = 0;
- struct memory_info meminfo;
- struct memseg_info seginfo[RTE_MAX_MEMSEG];
- int flags, page_size = getpagesize();
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- struct rte_memseg *memseg = mcfg->memseg;
- uint64_t total_mem = internal_config.memory;
-
- memset(seginfo, 0, sizeof(seginfo));
- memset(&meminfo, 0, sizeof(struct memory_info));
-
- mem_size = get_xen_memory_size();
- requested = (unsigned) (total_mem / 0x100000);
- if (requested > mem_size)
- /* if we didn't satisfy total memory requirements */
- rte_exit(EXIT_FAILURE,"Not enough memory available! Requested: %uMB,"
- " available: %uMB\n", requested, mem_size);
- else if (total_mem != 0)
- mem_size = requested;
-
- /* Check FD and open once */
- if (xen_fd < 0) {
- xen_fd = open(DOM0_MM_DEV, O_RDWR);
- if (xen_fd < 0) {
- RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
- return -1;
- }
- }
-
- meminfo.size = mem_size;
-
- /* construct memory mangement name for Dom0 */
- snprintf(meminfo.name, DOM0_NAME_MAX, "%s-%s",
- internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
-
- /* Notify kernel driver to allocate memory */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_PREPARE_MEMSEG, &meminfo);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memory\n");
- err = -EIO;
- goto fail;
- }
-
- /* Get number of memory segment from driver */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_NUM_MEMSEG, &num_memseg);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg count.\n");
- err = -EIO;
- goto fail;
- }
-
- if(num_memseg > RTE_MAX_MEMSEG){
- RTE_LOG(ERR, EAL, "XEN DOM0: the memseg count %d is greater"
- " than max memseg %d.\n",num_memseg, RTE_MAX_MEMSEG);
- err = -EIO;
- goto fail;
- }
-
- /* get all memory segements information */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_MEMSEG_INFO, seginfo);
- if (ret < 0) {
- RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg info.\n");
- err = -EIO;
- goto fail;
- }
-
- /* map all memory segments to contiguous user space */
- for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++)
- {
- vma_len = seginfo[memseg_idx].size;
-
- /**
- * get the biggest virtual memory area up to vma_len. If it fails,
- * vma_addr is NULL, so let the kernel provide the address.
- */
- vma_addr = xen_get_virtual_area(&vma_len, RTE_PGSIZE_2M);
- if (vma_addr == NULL) {
- flags = MAP_SHARED;
- vma_len = RTE_PGSIZE_2M;
- } else
- flags = MAP_SHARED | MAP_FIXED;
-
- seginfo[memseg_idx].size = vma_len;
- vir_addr = mmap(vma_addr, seginfo[memseg_idx].size,
- PROT_READ|PROT_WRITE, flags, xen_fd,
- memseg_idx * page_size);
- if (vir_addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "XEN DOM0:Could not mmap %s\n",
- DOM0_MM_DEV);
- err = -EIO;
- goto fail;
- }
-
- memseg[memseg_idx].addr = vir_addr;
- memseg[memseg_idx].phys_addr = page_size *
- seginfo[memseg_idx].pfn ;
- memseg[memseg_idx].len = seginfo[memseg_idx].size;
- for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
- memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];
-
- /* MFNs are continuous in 2M, so assume that page size is 2M */
- memseg[memseg_idx].hugepage_sz = RTE_PGSIZE_2M;
-
- memseg[memseg_idx].nchannel = mcfg->nchannel;
- memseg[memseg_idx].nrank = mcfg->nrank;
-
- /* NUMA is not suppoted in Xen Dom0, so only set socket 0*/
- memseg[memseg_idx].socket_id = 0;
- }
-
- return 0;
-fail:
- if (xen_fd > 0) {
- close(xen_fd);
- xen_fd = -1;
- }
- return err;
-}
-
-/*
- * This creates the memory mappings in the secondary process to match that of
- * the server process. It goes through each memory segment in the DPDK runtime
- * configuration, mapping them in order to form a contiguous block in the
- * virtual memory space
- */
-int
-rte_xen_dom0_memory_attach(void)
-{
- const struct rte_mem_config *mcfg;
- unsigned s = 0; /* s used to track the segment number */
- int xen_fd = -1;
- int ret = -1;
- void *vir_addr;
- char name[DOM0_NAME_MAX] = {0};
- int page_size = getpagesize();
-
- mcfg = rte_eal_get_configuration()->mem_config;
-
- /* Check FD and open once */
- if (xen_fd < 0) {
- xen_fd = open(DOM0_MM_DEV, O_RDWR);
- if (xen_fd < 0) {
- RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
- goto error;
- }
- }
-
- /* construct memory mangement name for Dom0 */
- snprintf(name, DOM0_NAME_MAX, "%s-%s",
- internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
- /* attach to memory segments of primary process */
- ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
- if (ret) {
- RTE_LOG(ERR, EAL,"attach memory segments fail.\n");
- goto error;
- }
-
- /* map all segments into memory to make sure we get the addrs */
- for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
-
- /*
- * the first memory segment with len==0 is the one that
- * follows the last valid segment.
- */
- if (mcfg->memseg[s].len == 0)
- break;
-
- vir_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
- PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, xen_fd,
- s * page_size);
- if (vir_addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
- "in %s to requested address [%p]\n",
- (unsigned long long)mcfg->memseg[s].len, DOM0_MM_DEV,
- mcfg->memseg[s].addr);
- goto error;
- }
- }
- return 0;
-
-error:
- if (xen_fd >= 0) {
- close(xen_fd);
- xen_fd = -1;
- }
- return -1;
-}
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
deleted file mode 100644
index d9707780..00000000
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2007-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Contact Information:
- * Intel Corporation
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _RTE_DOM0_COMMON_H_
-#define _RTE_DOM0_COMMON_H_
-
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
-
-#define DOM0_NAME_MAX 256
-#define DOM0_MM_DEV "/dev/dom0_mm"
-
-#define DOM0_CONTIG_NUM_ORDER 9 /**< order of 2M */
-#define DOM0_NUM_MEMSEG 512 /**< Maximum nb. of memory segment. */
-#define DOM0_MEMBLOCK_SIZE 0x200000 /**< size of memory block(2M). */
-#define DOM0_CONFIG_MEMSIZE 4096 /**< Maximum config memory size(4G). */
-#define DOM0_NUM_MEMBLOCK (DOM0_CONFIG_MEMSIZE / 2) /**< Maximum nb. of 2M memory block. */
-
-#define RTE_DOM0_IOCTL_PREPARE_MEMSEG _IOWR(0, 1 , struct memory_info)
-#define RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG _IOWR(0, 2 , char *)
-#define RTE_DOM0_IOCTL_GET_NUM_MEMSEG _IOWR(0, 3, int)
-#define RTE_DOM0_IOCTL_GET_MEMSEG_INFO _IOWR(0, 4, void *)
-
-/**
- * A structure used to store memory information.
- */
-struct memory_info {
- char name[DOM0_NAME_MAX];
- uint64_t size;
-};
-
-/**
- * A structure used to store memory segment information.
- */
-struct memseg_info {
- uint32_t idx;
- uint64_t pfn;
- uint64_t size;
- uint64_t mfn[DOM0_NUM_MEMBLOCK];
-};
-
-/**
- * A structure used to store memory block information.
- */
-struct memblock_info {
- uint8_t exchange_flag;
- uint8_t used;
- uint64_t vir_addr;
- uint64_t pfn;
- uint64_t mfn;
-};
-#endif /* _RTE_DOM0_COMMON_H_ */
diff --git a/lib/librte_eal/linuxapp/igb_uio/compat.h b/lib/librte_eal/linuxapp/igb_uio/compat.h
index b800a53c..ce456d4b 100644
--- a/lib/librte_eal/linuxapp/igb_uio/compat.h
+++ b/lib/librte_eal/linuxapp/igb_uio/compat.h
@@ -16,12 +16,9 @@
#endif
#ifndef PCI_MSIX_ENTRY_SIZE
-#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4
-#define PCI_MSIX_ENTRY_DATA 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
-#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#define PCI_MSIX_ENTRY_SIZE 16
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
#endif
/*
@@ -124,6 +121,14 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev)
#endif /* < 3.3.0 */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
-#define HAVE_PCI_ENABLE_MSIX
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+#define HAVE_ALLOC_IRQ_VECTORS 1
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+#define HAVE_MSI_LIST_IN_GENERIC_DEVICE 1
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
+#define HAVE_PCI_MSI_MASK_IRQ 1
#endif
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
index 07a19a31..a3a98c17 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -29,13 +29,11 @@
#include <linux/pci.h>
#include <linux/uio_driver.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/version.h>
#include <linux/slab.h>
-#ifdef CONFIG_XEN_DOM0
-#include <xen/xen.h>
-#endif
#include <rte_pci_dev_features.h>
#include "compat.h"
@@ -51,7 +49,6 @@ struct rte_uio_pci_dev {
static char *intr_mode;
static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
-
/* sriov sysfs */
static ssize_t
show_max_vfs(struct device *dev, struct device_attribute *attr,
@@ -91,14 +88,16 @@ static struct attribute *dev_attrs[] = {
static const struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
+
+#ifndef HAVE_PCI_MSI_MASK_IRQ
/*
* It masks the msix on/off of generating MSI-X messages.
*/
static void
-igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
{
u32 mask_bits = desc->masked;
- unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
if (state != 0)
@@ -113,6 +112,52 @@ igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
}
}
+/*
+ * It masks the msi on/off of generating MSI messages.
+ */
+static void
+igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
+{
+ u32 mask_bits = desc->masked;
+ u32 offset = desc->irq - pdev->irq;
+ u32 mask = 1 << offset;
+
+ if (!desc->msi_attrib.maskbit)
+ return;
+
+ if (state != 0)
+ mask_bits &= ~mask;
+ else
+ mask_bits |= mask;
+
+ if (mask_bits != desc->masked) {
+ pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
+ desc->masked = mask_bits;
+ }
+}
+
+static void
+igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
+{
+ struct msi_desc *desc;
+ struct list_head *msi_list;
+
+#ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
+ msi_list = &pdev->dev.msi_list;
+#else
+ msi_list = &pdev->msi_list;
+#endif
+
+ if (mode == RTE_INTR_MODE_MSIX) {
+ list_for_each_entry(desc, msi_list, list)
+ igbuio_msix_mask_irq(desc, irq_state);
+ } else if (mode == RTE_INTR_MODE_MSI) {
+ list_for_each_entry(desc, msi_list, list)
+ igbuio_msi_mask_irq(pdev, desc, irq_state);
+ }
+}
+#endif
+
/**
* This is the irqcontrol callback to be registered to uio_info.
* It can be used to disable/enable interrupt from user space processes.
@@ -132,21 +177,26 @@ igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
struct rte_uio_pci_dev *udev = info->priv;
struct pci_dev *pdev = udev->pdev;
- pci_cfg_access_lock(pdev);
- if (udev->mode == RTE_INTR_MODE_LEGACY)
- pci_intx(pdev, !!irq_state);
+#ifdef HAVE_PCI_MSI_MASK_IRQ
+ struct irq_data *irq = irq_get_irq_data(udev->info.irq);
+#endif
- else if (udev->mode == RTE_INTR_MODE_MSIX) {
- struct msi_desc *desc;
+ pci_cfg_access_lock(pdev);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
- list_for_each_entry(desc, &pdev->msi_list, list)
- igbuio_msix_mask_irq(desc, irq_state);
+ if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
+#ifdef HAVE_PCI_MSI_MASK_IRQ
+ if (irq_state == 1)
+ pci_msi_unmask_irq(irq);
+ else
+ pci_msi_mask_irq(irq);
#else
- list_for_each_entry(desc, &pdev->dev.msi_list, list)
- igbuio_msix_mask_irq(desc, irq_state);
+ igbuio_mask_irq(pdev, udev->mode, irq_state);
#endif
}
+
+ if (udev->mode == RTE_INTR_MODE_LEGACY)
+ pci_intx(pdev, !!irq_state);
+
pci_cfg_access_unlock(pdev);
return 0;
@@ -157,19 +207,125 @@ igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
* If yes, disable it here and will be enable later.
*/
static irqreturn_t
-igbuio_pci_irqhandler(int irq, struct uio_info *info)
+igbuio_pci_irqhandler(int irq, void *dev_id)
{
- struct rte_uio_pci_dev *udev = info->priv;
+ struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
+ struct uio_info *info = &udev->info;
/* Legacy mode need to mask in hardware */
if (udev->mode == RTE_INTR_MODE_LEGACY &&
!pci_check_and_mask_intx(udev->pdev))
return IRQ_NONE;
+ uio_event_notify(info);
+
/* Message signal mode, no share IRQ and automasked */
return IRQ_HANDLED;
}
+static int
+igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
+{
+ int err = 0;
+#ifndef HAVE_ALLOC_IRQ_VECTORS
+ struct msix_entry msix_entry;
+#endif
+
+ switch (igbuio_intr_mode_preferred) {
+ case RTE_INTR_MODE_MSIX:
+ /* Only 1 msi-x vector needed */
+#ifndef HAVE_ALLOC_IRQ_VECTORS
+ msix_entry.entry = 0;
+ if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
+ dev_dbg(&udev->pdev->dev, "using MSI-X");
+ udev->info.irq_flags = IRQF_NO_THREAD;
+ udev->info.irq = msix_entry.vector;
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+#else
+ if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
+ dev_dbg(&udev->pdev->dev, "using MSI-X");
+ udev->info.irq_flags = IRQF_NO_THREAD;
+ udev->info.irq = pci_irq_vector(udev->pdev, 0);
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+#endif
+
+ /* fall back to MSI */
+ case RTE_INTR_MODE_MSI:
+#ifndef HAVE_ALLOC_IRQ_VECTORS
+ if (pci_enable_msi(udev->pdev) == 0) {
+ dev_dbg(&udev->pdev->dev, "using MSI");
+ udev->info.irq_flags = IRQF_NO_THREAD;
+ udev->info.irq = udev->pdev->irq;
+ udev->mode = RTE_INTR_MODE_MSI;
+ break;
+ }
+#else
+ if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
+ dev_dbg(&udev->pdev->dev, "using MSI");
+ udev->info.irq_flags = IRQF_NO_THREAD;
+ udev->info.irq = pci_irq_vector(udev->pdev, 0);
+ udev->mode = RTE_INTR_MODE_MSI;
+ break;
+ }
+#endif
+ /* fall back to INTX */
+ case RTE_INTR_MODE_LEGACY:
+ if (pci_intx_mask_supported(udev->pdev)) {
+ dev_dbg(&udev->pdev->dev, "using INTX");
+ udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
+ udev->info.irq = udev->pdev->irq;
+ udev->mode = RTE_INTR_MODE_LEGACY;
+ break;
+ }
+ dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
+ /* fall back to no IRQ */
+ case RTE_INTR_MODE_NONE:
+ udev->mode = RTE_INTR_MODE_NONE;
+ udev->info.irq = UIO_IRQ_NONE;
+ break;
+
+ default:
+ dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
+ igbuio_intr_mode_preferred);
+ udev->info.irq = UIO_IRQ_NONE;
+ err = -EINVAL;
+ }
+
+ if (udev->info.irq != UIO_IRQ_NONE)
+ err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
+ udev->info.irq_flags, udev->info.name,
+ udev);
+ dev_info(&udev->pdev->dev, "uio device registered with irq %lx\n",
+ udev->info.irq);
+
+ return err;
+}
+
+static void
+igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
+{
+ if (udev->info.irq) {
+ free_irq(udev->info.irq, udev);
+ udev->info.irq = 0;
+ }
+
+#ifndef HAVE_ALLOC_IRQ_VECTORS
+ if (udev->mode == RTE_INTR_MODE_MSIX)
+ pci_disable_msix(udev->pdev);
+ if (udev->mode == RTE_INTR_MODE_MSI)
+ pci_disable_msi(udev->pdev);
+#else
+ if (udev->mode == RTE_INTR_MODE_MSIX ||
+ udev->mode == RTE_INTR_MODE_MSI)
+ pci_free_irq_vectors(udev->pdev);
+#endif
+}
+
+
/**
* This gets called while opening uio device file.
*/
@@ -178,12 +334,17 @@ igbuio_pci_open(struct uio_info *info, struct inode *inode)
{
struct rte_uio_pci_dev *udev = info->priv;
struct pci_dev *dev = udev->pdev;
-
- pci_reset_function(dev);
+ int err;
/* set bus master, which was cleared by the reset function */
pci_set_master(dev);
+ /* enable interrupts */
+ err = igbuio_pci_enable_interrupts(udev);
+ if (err) {
+ dev_err(&dev->dev, "Enable interrupt fails\n");
+ return err;
+ }
return 0;
}
@@ -193,60 +354,15 @@ igbuio_pci_release(struct uio_info *info, struct inode *inode)
struct rte_uio_pci_dev *udev = info->priv;
struct pci_dev *dev = udev->pdev;
+ /* disable interrupts */
+ igbuio_pci_disable_interrupts(udev);
+
/* stop the device from further DMA */
pci_clear_master(dev);
- pci_reset_function(dev);
-
return 0;
}
-#ifdef CONFIG_XEN_DOM0
-static int
-igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
-{
- int idx;
-
- idx = (int)vma->vm_pgoff;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#ifdef HAVE_PTE_MASK_PAGE_IOMAP
- vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
-#endif
-
- return remap_pfn_range(vma,
- vma->vm_start,
- info->mem[idx].addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
-/**
- * This is uio device mmap method which will use igbuio mmap for Xen
- * Dom0 environment.
- */
-static int
-igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
-{
- int idx;
-
- if (vma->vm_pgoff >= MAX_UIO_MAPS)
- return -EINVAL;
-
- if (info->mem[vma->vm_pgoff].size == 0)
- return -EINVAL;
-
- idx = (int)vma->vm_pgoff;
- switch (info->mem[idx].memtype) {
- case UIO_MEM_PHYS:
- return igbuio_dom0_mmap_phys(info, vma);
- case UIO_MEM_LOGICAL:
- case UIO_MEM_VIRTUAL:
- default:
- return -EINVAL;
- }
-}
-#endif
-
/* Remap pci resources described by bar #pci_bar in uio resource n. */
static int
igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
@@ -356,9 +472,6 @@ static int
igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct rte_uio_pci_dev *udev;
-#ifdef HAVE_PCI_ENABLE_MSIX
- struct msix_entry msix_entry;
-#endif
dma_addr_t map_dma_addr;
void *map_addr;
int err;
@@ -401,61 +514,12 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* fill uio infos */
udev->info.name = "igb_uio";
udev->info.version = "0.1";
- udev->info.handler = igbuio_pci_irqhandler;
udev->info.irqcontrol = igbuio_pci_irqcontrol;
udev->info.open = igbuio_pci_open;
udev->info.release = igbuio_pci_release;
-#ifdef CONFIG_XEN_DOM0
- /* check if the driver run on Xen Dom0 */
- if (xen_initial_domain())
- udev->info.mmap = igbuio_dom0_pci_mmap;
-#endif
udev->info.priv = udev;
udev->pdev = dev;
- switch (igbuio_intr_mode_preferred) {
- case RTE_INTR_MODE_MSIX:
- /* Only 1 msi-x vector needed */
-#ifdef HAVE_PCI_ENABLE_MSIX
- msix_entry.entry = 0;
- if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
- dev_dbg(&dev->dev, "using MSI-X");
- udev->info.irq_flags = IRQF_NO_THREAD;
- udev->info.irq = msix_entry.vector;
- udev->mode = RTE_INTR_MODE_MSIX;
- break;
- }
-#else
- if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
- dev_dbg(&dev->dev, "using MSI-X");
- udev->info.irq = pci_irq_vector(dev, 0);
- udev->mode = RTE_INTR_MODE_MSIX;
- break;
- }
-#endif
- /* fall back to INTX */
- case RTE_INTR_MODE_LEGACY:
- if (pci_intx_mask_supported(dev)) {
- dev_dbg(&dev->dev, "using INTX");
- udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
- udev->info.irq = dev->irq;
- udev->mode = RTE_INTR_MODE_LEGACY;
- break;
- }
- dev_notice(&dev->dev, "PCI INTX mask not supported\n");
- /* fall back to no IRQ */
- case RTE_INTR_MODE_NONE:
- udev->mode = RTE_INTR_MODE_NONE;
- udev->info.irq = 0;
- break;
-
- default:
- dev_err(&dev->dev, "invalid IRQ mode %u",
- igbuio_intr_mode_preferred);
- err = -EINVAL;
- goto fail_release_iomem;
- }
-
err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
if (err != 0)
goto fail_release_iomem;
@@ -467,9 +531,6 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_drvdata(dev, udev);
- dev_info(&dev->dev, "uio device registered with irq %lx\n",
- udev->info.irq);
-
/*
* Doing a harmless dma mapping for attaching the device to
* the iommu identity mapping if kernel boots with iommu=pt.
@@ -497,8 +558,6 @@ fail_remove_group:
sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
fail_release_iomem:
igbuio_pci_release_iomem(&udev->info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
- pci_disable_msix(udev->pdev);
pci_disable_device(dev);
fail_free:
kfree(udev);
@@ -514,8 +573,6 @@ igbuio_pci_remove(struct pci_dev *dev)
sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
uio_unregister_device(&udev->info);
igbuio_pci_release_iomem(&udev->info);
- if (udev->mode == RTE_INTR_MODE_MSIX)
- pci_disable_msix(dev);
pci_disable_device(dev);
pci_set_drvdata(dev, NULL);
kfree(udev);
@@ -532,6 +589,9 @@ igbuio_config_intr_mode(char *intr_str)
if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
pr_info("Use MSIX interrupt\n");
+ } else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
+ igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
+ pr_info("Use MSI interrupt\n");
} else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
pr_info("Use legacy interrupt\n");
@@ -575,6 +635,7 @@ module_param(intr_mode, charp, S_IRUGO);
MODULE_PARM_DESC(intr_mode,
"igb_uio interrupt mode (default=msix):\n"
" " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
+" " RTE_INTR_MODE_MSI_NAME " Use MSI interrupt\n"
" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
"\n");
diff --git a/lib/librte_eal/linuxapp/kni/compat.h b/lib/librte_eal/linuxapp/kni/compat.h
index 6a1587b4..3f8c0bc8 100644
--- a/lib/librte_eal/linuxapp/kni/compat.h
+++ b/lib/librte_eal/linuxapp/kni/compat.h
@@ -8,6 +8,34 @@
#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
#endif
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
+#endif
+#ifdef CONFIG_SUSE_KERNEL
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
+/* SLES12SP3 is at least 4.4.57+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 28))
+/* SLES12 is at least 3.12.28+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 61)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)))
+/* SLES11 SP3 is at least 3.0.61+ based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32))
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 27))
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
(!(defined(RHEL_RELEASE_CODE) && \
RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
@@ -55,7 +83,8 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || \
(defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(12, 3, 0))
#define HAVE_TRANS_START_HELPER
#endif
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
index e0a03542..e38a7561 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
@@ -697,22 +697,22 @@ struct _kc_ethtool_pauseparam {
#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
#endif
#ifdef CONFIG_SUSE_KERNEL
-#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
-/* SLES11 GA is 2.6.27 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
-#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
-/* SLES11 SP1 is 2.6.32 based */
-#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
+/* SLES12SP3 is at least 4.4.57+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
+#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,28) )
+/* SLES12 is at least 3.12.28+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
/* SLES11 SP3 is at least 3.0.61+ based */
#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
-#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,28) )
-/* SLES12 is at least 3.12.28+ based */
-#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
-/* SLES12SP3 is at least 4.4.57+ based */
-#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
#endif /* CONFIG_SUSE_KERNEL */
#ifndef SLE_VERSION_CODE
diff --git a/lib/librte_eal/linuxapp/xen_dom0/compat.h b/lib/librte_eal/linuxapp/xen_dom0/compat.h
deleted file mode 100644
index e6eb97f2..00000000
--- a/lib/librte_eal/linuxapp/xen_dom0/compat.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Minimal wrappers to allow compiling xen_dom0 on older kernels.
- */
-
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
- (!(defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
-
-#define kstrtoul strict_strtoul
-
-#endif /* < 2.6.39 */
diff --git a/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h b/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
deleted file mode 100644
index 9d5ffb22..00000000
--- a/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_dev.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _DOM0_MM_DEV_H_
-#define _DOM0_MM_DEV_H_
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <exec-env/rte_dom0_common.h>
-
-#define NUM_MEM_CTX 256 /**< Maximum number of memory context*/
-#define MAX_EXCHANGE_FAIL_TIME 5 /**< Maximum times of allowing exchange fail .*/
-#define MAX_MEMBLOCK_SIZE (2 * DOM0_MEMBLOCK_SIZE)
-#define MAX_NUM_ORDER (DOM0_CONTIG_NUM_ORDER + 1)
-#define SIZE_PER_BLOCK 2 /**< Size of memory block (2MB).*/
-
-/**
- * A structure describing the private information for a dom0 device.
- */
-struct dom0_mm_dev {
- struct miscdevice miscdev;
- uint8_t fail_times;
- uint32_t used_memsize;
- uint32_t num_mem_ctx;
- uint32_t config_memsize;
- uint32_t num_bigblock;
- struct dom0_mm_data *mm_data[NUM_MEM_CTX];
- struct mutex data_lock;
-};
-
-struct dom0_mm_data{
- uint32_t refcnt;
- uint32_t num_memseg; /**< Number of memory segment. */
- uint32_t mem_size; /**< Size of requesting memory. */
-
- char name[DOM0_NAME_MAX];
-
- /** Store global memory block IDs used by an instance */
- uint32_t block_num[DOM0_NUM_MEMBLOCK];
-
- /** Store memory block information.*/
- struct memblock_info block_info[DOM0_NUM_MEMBLOCK];
-
- /** Store memory segment information.*/
- struct memseg_info seg_info[DOM0_NUM_MEMSEG];
-};
-
-#define XEN_ERR(args...) printk(KERN_DEBUG "XEN_DOM0: Error: " args)
-#define XEN_PRINT(args...) printk(KERN_DEBUG "XEN_DOM0: " args)
-#endif
diff --git a/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c b/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
deleted file mode 100644
index 79630bad..00000000
--- a/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c
+++ /dev/null
@@ -1,780 +0,0 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/version.h>
-
-#include <xen/xen.h>
-#include <xen/page.h>
-#include <xen/xen-ops.h>
-#include <xen/interface/memory.h>
-
-#include <exec-env/rte_dom0_common.h>
-
-#include "compat.h"
-#include "dom0_mm_dev.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Kernel Module for supporting DPDK running on Xen Dom0");
-
-static struct dom0_mm_dev dom0_dev;
-static struct kobject *dom0_kobj = NULL;
-
-static struct memblock_info *rsv_mm_info;
-
-/* Default configuration for reserved memory size(2048 MB). */
-static uint32_t rsv_memsize = 2048;
-
-static int dom0_open(struct inode *inode, struct file *file);
-static int dom0_release(struct inode *inode, struct file *file);
-static int dom0_ioctl(struct file *file, unsigned int ioctl_num,
- unsigned long ioctl_param);
-static int dom0_mmap(struct file *file, struct vm_area_struct *vma);
-static int dom0_memory_free(uint32_t size);
-static int dom0_memory_release(struct dom0_mm_data *mm_data);
-
-static const struct file_operations data_fops = {
- .owner = THIS_MODULE,
- .open = dom0_open,
- .release = dom0_release,
- .mmap = dom0_mmap,
- .unlocked_ioctl = (void *)dom0_ioctl,
-};
-
-static ssize_t
-show_memsize_rsvd(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, 10, "%u\n", dom0_dev.used_memsize);
-}
-
-static ssize_t
-show_memsize(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, 10, "%u\n", dom0_dev.config_memsize);
-}
-
-static ssize_t
-store_memsize(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int err = 0;
- unsigned long mem_size;
-
- if (0 != kstrtoul(buf, 0, &mem_size))
- return -EINVAL;
-
- mutex_lock(&dom0_dev.data_lock);
- if (0 == mem_size) {
- err = -EINVAL;
- goto fail;
- } else if (mem_size > (rsv_memsize - dom0_dev.used_memsize)) {
- XEN_ERR("configure memory size fail\n");
- err = -EINVAL;
- goto fail;
- } else
- dom0_dev.config_memsize = mem_size;
-
-fail:
- mutex_unlock(&dom0_dev.data_lock);
- return err ? err : count;
-}
-
-static DEVICE_ATTR(memsize, S_IRUGO | S_IWUSR, show_memsize, store_memsize);
-static DEVICE_ATTR(memsize_rsvd, S_IRUGO, show_memsize_rsvd, NULL);
-
-static struct attribute *dev_attrs[] = {
- &dev_attr_memsize.attr,
- &dev_attr_memsize_rsvd.attr,
- NULL,
-};
-
-/* the memory size unit is MB */
-static const struct attribute_group dev_attr_grp = {
- .name = "memsize-mB",
- .attrs = dev_attrs,
-};
-
-
-static void
-sort_viraddr(struct memblock_info *mb, int cnt)
-{
- int i,j;
- uint64_t tmp_pfn;
- uint64_t tmp_viraddr;
-
- /*sort virtual address and pfn */
- for(i = 0; i < cnt; i ++) {
- for(j = cnt - 1; j > i; j--) {
- if(mb[j].pfn < mb[j - 1].pfn) {
- tmp_pfn = mb[j - 1].pfn;
- mb[j - 1].pfn = mb[j].pfn;
- mb[j].pfn = tmp_pfn;
-
- tmp_viraddr = mb[j - 1].vir_addr;
- mb[j - 1].vir_addr = mb[j].vir_addr;
- mb[j].vir_addr = tmp_viraddr;
- }
- }
- }
-}
-
-static int
-dom0_find_memdata(const char * mem_name)
-{
- unsigned i;
- int idx = -1;
- for(i = 0; i< NUM_MEM_CTX; i++) {
- if(dom0_dev.mm_data[i] == NULL)
- continue;
- if (!strncmp(dom0_dev.mm_data[i]->name, mem_name,
- sizeof(char) * DOM0_NAME_MAX)) {
- idx = i;
- break;
- }
- }
-
- return idx;
-}
-
-static int
-dom0_find_mempos(void)
-{
- unsigned i;
- int idx = -1;
-
- for(i = 0; i< NUM_MEM_CTX; i++) {
- if(dom0_dev.mm_data[i] == NULL){
- idx = i;
- break;
- }
- }
-
- return idx;
-}
-
-static int
-dom0_memory_release(struct dom0_mm_data *mm_data)
-{
- int idx;
- uint32_t num_block, block_id;
-
- /* each memory block is 2M */
- num_block = mm_data->mem_size / SIZE_PER_BLOCK;
- if (num_block == 0)
- return -EINVAL;
-
- /* reset global memory data */
- idx = dom0_find_memdata(mm_data->name);
- if (idx >= 0) {
- dom0_dev.used_memsize -= mm_data->mem_size;
- dom0_dev.mm_data[idx] = NULL;
- dom0_dev.num_mem_ctx--;
- }
-
- /* reset these memory blocks status as free */
- for (idx = 0; idx < num_block; idx++) {
- block_id = mm_data->block_num[idx];
- rsv_mm_info[block_id].used = 0;
- }
-
- memset(mm_data, 0, sizeof(struct dom0_mm_data));
- vfree(mm_data);
- return 0;
-}
-
-static int
-dom0_memory_free(uint32_t rsv_size)
-{
- uint64_t vstart, vaddr;
- uint32_t i, num_block, size;
-
- if (!xen_pv_domain())
- return -1;
-
- /* each memory block is 2M */
- num_block = rsv_size / SIZE_PER_BLOCK;
- if (num_block == 0)
- return -EINVAL;
-
- /* free all memory blocks of size of 4M and destroy contiguous region */
- for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
- vstart = rsv_mm_info[i].vir_addr;
- if (vstart) {
- #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(vstart,
- DOM0_CONTIG_NUM_ORDER);
- if (rsv_mm_info[i + 1].exchange_flag)
- xen_destroy_contiguous_region(vstart +
- DOM0_MEMBLOCK_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- #else
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(rsv_mm_info[i].pfn
- * PAGE_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- if (rsv_mm_info[i + 1].exchange_flag)
- xen_destroy_contiguous_region(rsv_mm_info[i].pfn
- * PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
- DOM0_CONTIG_NUM_ORDER);
- #endif
-
- size = DOM0_MEMBLOCK_SIZE * 2;
- vaddr = vstart;
- while (size > 0) {
- ClearPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages(vstart, MAX_NUM_ORDER);
- }
- }
-
- /* free all memory blocks size of 2M and destroy contiguous region */
- for (; i < num_block; i++) {
- vstart = rsv_mm_info[i].vir_addr;
- if (vstart) {
- if (rsv_mm_info[i].exchange_flag)
- xen_destroy_contiguous_region(vstart,
- DOM0_CONTIG_NUM_ORDER);
-
- size = DOM0_MEMBLOCK_SIZE;
- vaddr = vstart;
- while (size > 0) {
- ClearPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
- }
- }
-
- memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
- vfree(rsv_mm_info);
- rsv_mm_info = NULL;
-
- return 0;
-}
-
-static void
-find_free_memory(uint32_t count, struct dom0_mm_data *mm_data)
-{
- uint32_t i = 0;
- uint32_t j = 0;
-
- while ((i < count) && (j < rsv_memsize / SIZE_PER_BLOCK)) {
- if (rsv_mm_info[j].used == 0) {
- mm_data->block_info[i].pfn = rsv_mm_info[j].pfn;
- mm_data->block_info[i].vir_addr =
- rsv_mm_info[j].vir_addr;
- mm_data->block_info[i].mfn = rsv_mm_info[j].mfn;
- mm_data->block_info[i].exchange_flag =
- rsv_mm_info[j].exchange_flag;
- mm_data->block_num[i] = j;
- rsv_mm_info[j].used = 1;
- i++;
- }
- j++;
- }
-}
-
-/**
- * Find all memory segments in which physical addresses are contiguous.
- */
-static void
-find_memseg(int count, struct dom0_mm_data * mm_data)
-{
- int i = 0;
- int j, k, idx = 0;
- uint64_t zone_len, pfn, num_block;
-
- while(i < count) {
- if (mm_data->block_info[i].exchange_flag == 0) {
- i++;
- continue;
- }
- k = 0;
- pfn = mm_data->block_info[i].pfn;
- mm_data->seg_info[idx].pfn = pfn;
- mm_data->seg_info[idx].mfn[k] = mm_data->block_info[i].mfn;
-
- for (j = i + 1; j < count; j++) {
-
- /* ignore exchange fail memory block */
- if (mm_data->block_info[j].exchange_flag == 0)
- break;
-
- if (mm_data->block_info[j].pfn !=
- (mm_data->block_info[j - 1].pfn +
- DOM0_MEMBLOCK_SIZE / PAGE_SIZE))
- break;
- ++k;
- mm_data->seg_info[idx].mfn[k] = mm_data->block_info[j].mfn;
- }
-
- num_block = j - i;
- zone_len = num_block * DOM0_MEMBLOCK_SIZE;
- mm_data->seg_info[idx].size = zone_len;
-
- XEN_PRINT("memseg id=%d, size=0x%llx\n", idx, zone_len);
- i = i+ num_block;
- idx++;
- if (idx == DOM0_NUM_MEMSEG)
- break;
- }
- mm_data->num_memseg = idx;
-}
-
-static int
-dom0_memory_reserve(uint32_t rsv_size)
-{
- uint64_t pfn, vstart, vaddr;
- uint32_t i, num_block, size, allocated_size = 0;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
- dma_addr_t dma_handle;
-#endif
-
- /* 2M as memory block */
- num_block = rsv_size / SIZE_PER_BLOCK;
-
- rsv_mm_info = vmalloc(sizeof(struct memblock_info) * num_block);
- if (!rsv_mm_info) {
- XEN_ERR("Unable to allocate device memory information\n");
- return -ENOMEM;
- }
- memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
-
- /* try alloc size of 4M once */
- for (i = 0; i < num_block; i += 2) {
- vstart = (unsigned long)
- __get_free_pages(GFP_ATOMIC, MAX_NUM_ORDER);
- if (vstart == 0)
- break;
-
- dom0_dev.num_bigblock = i / 2 + 1;
- allocated_size = SIZE_PER_BLOCK * (i + 2);
-
- /* size of 4M */
- size = DOM0_MEMBLOCK_SIZE * 2;
-
- vaddr = vstart;
- while (size > 0) {
- SetPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- pfn = virt_to_pfn(vstart);
- rsv_mm_info[i].pfn = pfn;
- rsv_mm_info[i].vir_addr = vstart;
- rsv_mm_info[i + 1].pfn =
- pfn + DOM0_MEMBLOCK_SIZE / PAGE_SIZE;
- rsv_mm_info[i + 1].vir_addr =
- vstart + DOM0_MEMBLOCK_SIZE;
- }
-
- /*if it failed to alloc 4M, and continue to alloc 2M once */
- for (; i < num_block; i++) {
- vstart = (unsigned long)
- __get_free_pages(GFP_ATOMIC, DOM0_CONTIG_NUM_ORDER);
- if (vstart == 0) {
- XEN_ERR("allocate memory fail.\n");
- dom0_memory_free(allocated_size);
- return -ENOMEM;
- }
-
- allocated_size += SIZE_PER_BLOCK;
-
- size = DOM0_MEMBLOCK_SIZE;
- vaddr = vstart;
- while (size > 0) {
- SetPageReserved(virt_to_page(vaddr));
- vaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- pfn = virt_to_pfn(vstart);
- rsv_mm_info[i].pfn = pfn;
- rsv_mm_info[i].vir_addr = vstart;
- }
-
- sort_viraddr(rsv_mm_info, num_block);
-
- for (i = 0; i< num_block; i++) {
-
- /*
- * This API is used to exchage MFN for getting a block of
- * contiguous physical addresses, its maximum size is 2M.
- */
- #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if (xen_create_contiguous_region(rsv_mm_info[i].vir_addr,
- DOM0_CONTIG_NUM_ORDER, 0) == 0) {
- #else
- if (xen_create_contiguous_region(rsv_mm_info[i].pfn * PAGE_SIZE,
- DOM0_CONTIG_NUM_ORDER, 0, &dma_handle) == 0) {
- #endif
- rsv_mm_info[i].exchange_flag = 1;
- rsv_mm_info[i].mfn =
- pfn_to_mfn(rsv_mm_info[i].pfn);
- rsv_mm_info[i].used = 0;
- } else {
- XEN_ERR("exchange memeory fail\n");
- rsv_mm_info[i].exchange_flag = 0;
- dom0_dev.fail_times++;
- if (dom0_dev.fail_times > MAX_EXCHANGE_FAIL_TIME) {
- dom0_memory_free(rsv_size);
- return -EFAULT;
- }
- }
- }
-
- return 0;
-}
-
-static int
-dom0_prepare_memsegs(struct memory_info *meminfo, struct dom0_mm_data *mm_data)
-{
- uint32_t num_block;
- int idx;
-
- /* check if there is a free name buffer */
- memcpy(mm_data->name, meminfo->name, DOM0_NAME_MAX);
- mm_data->name[DOM0_NAME_MAX - 1] = '\0';
- idx = dom0_find_mempos();
- if (idx < 0)
- return -1;
-
- num_block = meminfo->size / SIZE_PER_BLOCK;
- /* find free memory and new memory segments*/
- find_free_memory(num_block, mm_data);
- find_memseg(num_block, mm_data);
-
- /* update private memory data */
- mm_data->refcnt++;
- mm_data->mem_size = meminfo->size;
-
- /* update global memory data */
- dom0_dev.mm_data[idx] = mm_data;
- dom0_dev.num_mem_ctx++;
- dom0_dev.used_memsize += mm_data->mem_size;
-
- return 0;
-}
-
-static int
-dom0_check_memory (struct memory_info *meminfo)
-{
- int idx;
- uint64_t mem_size;
-
- /* round memory size to the next even number. */
- if (meminfo->size % 2)
- ++meminfo->size;
-
- mem_size = meminfo->size;
- if (dom0_dev.num_mem_ctx > NUM_MEM_CTX) {
- XEN_ERR("Memory data space is full in Dom0 driver\n");
- return -1;
- }
- idx = dom0_find_memdata(meminfo->name);
- if (idx >= 0) {
- XEN_ERR("Memory data name %s has already exsited in Dom0 driver.\n",
- meminfo->name);
- return -1;
- }
- if ((dom0_dev.used_memsize + mem_size) > rsv_memsize) {
- XEN_ERR("Total size can't be larger than reserved size.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int __init
-dom0_init(void)
-{
- if (!xen_domain())
- return -ENODEV;
-
- if (rsv_memsize > DOM0_CONFIG_MEMSIZE) {
- XEN_ERR("The reserved memory size cannot be greater than %d\n",
- DOM0_CONFIG_MEMSIZE);
- return -EINVAL;
- }
-
- /* Setup the misc device */
- dom0_dev.miscdev.minor = MISC_DYNAMIC_MINOR;
- dom0_dev.miscdev.name = "dom0_mm";
- dom0_dev.miscdev.fops = &data_fops;
-
- /* register misc char device */
- if (misc_register(&dom0_dev.miscdev) != 0) {
- XEN_ERR("Misc device registration failed\n");
- return -EPERM;
- }
-
- mutex_init(&dom0_dev.data_lock);
- dom0_kobj = kobject_create_and_add("dom0-mm", mm_kobj);
-
- if (!dom0_kobj) {
- XEN_ERR("dom0-mm object creation failed\n");
- misc_deregister(&dom0_dev.miscdev);
- return -ENOMEM;
- }
-
- if (sysfs_create_group(dom0_kobj, &dev_attr_grp)) {
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
- return -EPERM;
- }
-
- if (dom0_memory_reserve(rsv_memsize) < 0) {
- sysfs_remove_group(dom0_kobj, &dev_attr_grp);
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
- return -ENOMEM;
- }
-
- XEN_PRINT("####### DPDK Xen Dom0 module loaded #######\n");
-
- return 0;
-}
-
-static void __exit
-dom0_exit(void)
-{
- if (rsv_mm_info != NULL)
- dom0_memory_free(rsv_memsize);
-
- sysfs_remove_group(dom0_kobj, &dev_attr_grp);
- kobject_put(dom0_kobj);
- misc_deregister(&dom0_dev.miscdev);
-
- XEN_PRINT("####### DPDK Xen Dom0 module unloaded #######\n");
-}
-
-static int
-dom0_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
-
- XEN_PRINT(KERN_INFO "/dev/dom0_mm opened\n");
- return 0;
-}
-
-static int
-dom0_release(struct inode *inode, struct file *file)
-{
- int ret = 0;
- struct dom0_mm_data *mm_data = file->private_data;
-
- if (mm_data == NULL)
- return ret;
-
- mutex_lock(&dom0_dev.data_lock);
- if (--mm_data->refcnt == 0)
- ret = dom0_memory_release(mm_data);
- mutex_unlock(&dom0_dev.data_lock);
-
- file->private_data = NULL;
- XEN_PRINT(KERN_INFO "/dev/dom0_mm closed\n");
- return ret;
-}
-
-static int
-dom0_mmap(struct file *file, struct vm_area_struct *vm)
-{
- int status = 0;
- uint32_t idx = vm->vm_pgoff;
- uint64_t pfn, size = vm->vm_end - vm->vm_start;
- struct dom0_mm_data *mm_data = file->private_data;
-
- if(mm_data == NULL)
- return -EINVAL;
-
- mutex_lock(&dom0_dev.data_lock);
- if (idx >= mm_data->num_memseg) {
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- if (size > mm_data->seg_info[idx].size){
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- XEN_PRINT("mmap memseg idx =%d,size = 0x%llx\n", idx, size);
-
- pfn = mm_data->seg_info[idx].pfn;
- mutex_unlock(&dom0_dev.data_lock);
-
- status = remap_pfn_range(vm, vm->vm_start, pfn, size, PAGE_SHARED);
-
- return status;
-}
-static int
-dom0_ioctl(struct file *file,
- unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- int idx, ret;
- char name[DOM0_NAME_MAX] = {0};
- struct memory_info meminfo;
- struct dom0_mm_data *mm_data = file->private_data;
-
- XEN_PRINT("IOCTL num=0x%0x param=0x%0lx \n", ioctl_num, ioctl_param);
-
- /**
- * Switch according to the ioctl called
- */
- switch _IOC_NR(ioctl_num) {
- case _IOC_NR(RTE_DOM0_IOCTL_PREPARE_MEMSEG):
- ret = copy_from_user(&meminfo, (void *)ioctl_param,
- sizeof(struct memory_info));
- if (ret)
- return -EFAULT;
-
- if (mm_data != NULL) {
- XEN_ERR("Cannot create memory segment for the same"
- " file descriptor\n");
- return -EINVAL;
- }
-
- /* Allocate private data */
- mm_data = vmalloc(sizeof(struct dom0_mm_data));
- if (!mm_data) {
- XEN_ERR("Unable to allocate device private data\n");
- return -ENOMEM;
- }
- memset(mm_data, 0, sizeof(struct dom0_mm_data));
-
- mutex_lock(&dom0_dev.data_lock);
- /* check if we can allocate memory*/
- if (dom0_check_memory(&meminfo) < 0) {
- mutex_unlock(&dom0_dev.data_lock);
- vfree(mm_data);
- return -EINVAL;
- }
-
- /* allocate memory and created memory segments*/
- if (dom0_prepare_memsegs(&meminfo, mm_data) < 0) {
- XEN_ERR("create memory segment fail.\n");
- mutex_unlock(&dom0_dev.data_lock);
- return -EIO;
- }
-
- file->private_data = mm_data;
- mutex_unlock(&dom0_dev.data_lock);
- break;
-
- /* support multiple process in term of memory mapping*/
- case _IOC_NR(RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG):
- ret = copy_from_user(name, (void *)ioctl_param,
- sizeof(char) * DOM0_NAME_MAX);
- if (ret)
- return -EFAULT;
-
- mutex_lock(&dom0_dev.data_lock);
- idx = dom0_find_memdata(name);
- if (idx < 0) {
- mutex_unlock(&dom0_dev.data_lock);
- return -EINVAL;
- }
-
- mm_data = dom0_dev.mm_data[idx];
- mm_data->refcnt++;
- file->private_data = mm_data;
- mutex_unlock(&dom0_dev.data_lock);
- break;
-
- case _IOC_NR(RTE_DOM0_IOCTL_GET_NUM_MEMSEG):
- ret = copy_to_user((void *)ioctl_param, &mm_data->num_memseg,
- sizeof(int));
- if (ret)
- return -EFAULT;
- break;
-
- case _IOC_NR(RTE_DOM0_IOCTL_GET_MEMSEG_INFO):
- ret = copy_to_user((void *)ioctl_param,
- &mm_data->seg_info[0],
- sizeof(struct memseg_info) *
- mm_data->num_memseg);
- if (ret)
- return -EFAULT;
- break;
- default:
- XEN_PRINT("IOCTL default \n");
- break;
- }
-
- return 0;
-}
-
-module_init(dom0_init);
-module_exit(dom0_exit);
-
-module_param(rsv_memsize, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rsv_memsize, "Xen-dom0 reserved memory size(MB).\n");
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
index 3a8f1540..f4f46c1b 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -44,8 +44,6 @@ DPDK_2.0 {
rte_free;
rte_get_hpet_cycles;
rte_get_hpet_hz;
- rte_get_log_level;
- rte_get_log_type;
rte_get_tsc_hz;
rte_hexdump;
rte_intr_callback_register;
@@ -62,9 +60,7 @@ DPDK_2.0 {
rte_malloc_set_limit;
rte_malloc_socket;
rte_malloc_validate;
- rte_malloc_virt2phy;
rte_mem_lock_page;
- rte_mem_phy2mch;
rte_mem_virt2phy;
rte_memdump;
rte_memory_get_nchannel;
@@ -78,8 +74,6 @@ DPDK_2.0 {
rte_openlog_stream;
rte_realloc;
rte_set_application_usage_hook;
- rte_set_log_level;
- rte_set_log_type;
rte_socket_id;
rte_strerror;
rte_strsplit;
@@ -87,8 +81,6 @@ DPDK_2.0 {
rte_thread_get_affinity;
rte_thread_set_affinity;
rte_vlog;
- rte_xen_dom0_memory_attach;
- rte_xen_dom0_memory_init;
rte_zmalloc;
rte_zmalloc_socket;
@@ -118,8 +110,6 @@ DPDK_2.2 {
rte_keepalive_dispatch_pings;
rte_keepalive_mark_alive;
rte_keepalive_register_core;
- rte_xen_dom0_supported;
- rte_xen_mem_phy2mch;
} DPDK_2.1;
@@ -134,7 +124,6 @@ DPDK_16.04 {
DPDK_16.07 {
global:
- pci_get_sysfs_path;
rte_keepalive_mark_sleep;
rte_keepalive_register_relay_callback;
rte_rtm_supported;
@@ -174,25 +163,6 @@ DPDK_17.05 {
rte_log_set_global_level;
rte_log_set_level;
rte_log_set_level_regexp;
- rte_pci_detach;
- rte_pci_dump;
- rte_pci_ioport_map;
- rte_pci_ioport_read;
- rte_pci_ioport_unmap;
- rte_pci_ioport_write;
- rte_pci_map_device;
- rte_pci_probe;
- rte_pci_probe_one;
- rte_pci_read_config;
- rte_pci_register;
- rte_pci_scan;
- rte_pci_unmap_device;
- rte_pci_unregister;
- rte_pci_write_config;
- rte_vdev_init;
- rte_vdev_register;
- rte_vdev_uninit;
- rte_vdev_unregister;
vfio_get_container_fd;
vfio_get_group_fd;
vfio_get_group_no;
@@ -209,6 +179,27 @@ DPDK_17.08 {
} DPDK_17.05;
+DPDK_17.11 {
+ global:
+
+ rte_eal_create_uio_dev;
+ rte_bus_get_iommu_class;
+ rte_eal_has_pci;
+ rte_eal_iova_mode;
+ rte_eal_mbuf_default_mempool_ops;
+ rte_eal_using_phys_addrs;
+ rte_eal_vfio_intr_mode;
+ rte_lcore_has_role;
+ rte_malloc_virt2iova;
+ rte_mem_virt2iova;
+ rte_vfio_enable;
+ rte_vfio_is_enabled;
+ rte_vfio_noiommu_is_enabled;
+ rte_vfio_release_device;
+ rte_vfio_setup_device;
+
+} DPDK_17.08;
+
EXPERIMENTAL {
global:
@@ -217,28 +208,31 @@ EXPERIMENTAL {
rte_eal_devargs_remove;
rte_eal_hotplug_add;
rte_eal_hotplug_remove;
- rte_service_disable_on_lcore;
+ rte_service_component_register;
+ rte_service_component_unregister;
+ rte_service_component_runstate_set;
rte_service_dump;
- rte_service_enable_on_lcore;
rte_service_get_by_id;
rte_service_get_by_name;
rte_service_get_count;
- rte_service_get_enabled_on_lcore;
- rte_service_is_running;
+ rte_service_get_name;
rte_service_lcore_add;
rte_service_lcore_count;
+ rte_service_lcore_count_services;
rte_service_lcore_del;
rte_service_lcore_list;
rte_service_lcore_reset_all;
rte_service_lcore_start;
rte_service_lcore_stop;
+ rte_service_map_lcore_get;
+ rte_service_map_lcore_set;
rte_service_probe_capability;
- rte_service_register;
rte_service_reset;
+ rte_service_run_iter_on_app_lcore;
+ rte_service_runstate_get;
+ rte_service_runstate_set;
+ rte_service_set_runstate_mapped_check;
rte_service_set_stats_enable;
- rte_service_start;
rte_service_start_with_defaults;
- rte_service_stop;
- rte_service_unregister;
-} DPDK_17.08;
+} DPDK_17.11;
diff --git a/lib/librte_efd/Makefile b/lib/librte_efd/Makefile
index b9277bc5..16e450e8 100644
--- a/lib/librte_efd/Makefile
+++ b/lib/librte_efd/Makefile
@@ -36,6 +36,7 @@ LIB = librte_efd.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_ring -lrte_hash
EXPORT_MAP := rte_efd_version.map
diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
index 4d9a0887..8771d042 100644
--- a/lib/librte_efd/rte_efd.c
+++ b/lib/librte_efd/rte_efd.c
@@ -42,7 +42,6 @@
#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_malloc.h>
-#include <rte_memzone.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_memcpy.h>
@@ -1278,7 +1277,7 @@ efd_lookup_internal(const struct efd_online_group_entry * const group,
switch (lookup_fn) {
-#if defined(RTE_ARCH_X86)
+#if defined(RTE_ARCH_X86) && defined(CC_SUPPORT_AVX2)
case EFD_LOOKUP_AVX2:
return efd_lookup_internal_avx2(group->hash_idx,
group->lookup_table,
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index db692ae4..394cc9c0 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -38,14 +38,18 @@ LIB = librte_ethdev.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_net -lrte_eal -lrte_mempool -lrte_ring
+LDLIBS += -lrte_mbuf
-EXPORT_MAP := rte_ether_version.map
+EXPORT_MAP := rte_ethdev_version.map
-LIBABIVER := 6
+LIBABIVER := 8
SRCS-y += rte_ethdev.c
SRCS-y += rte_flow.c
SRCS-y += rte_tm.c
+SRCS-y += rte_mtr.c
+SRCS-y += ethdev_profile.c
#
# Export include files
@@ -59,5 +63,7 @@ SYMLINK-y-include += rte_flow.h
SYMLINK-y-include += rte_flow_driver.h
SYMLINK-y-include += rte_tm.h
SYMLINK-y-include += rte_tm_driver.h
+SYMLINK-y-include += rte_mtr.h
+SYMLINK-y-include += rte_mtr_driver.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ether/ethdev_profile.c b/lib/librte_ether/ethdev_profile.c
new file mode 100644
index 00000000..c9cb8420
--- /dev/null
+++ b/lib/librte_ether/ethdev_profile.c
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ethdev_profile.h"
+
+/**
+ * This conditional block enables RX queues profiling by tracking wasted
+ * iterations, i.e. iterations which yielded no RX packets. Profiling is
+ * performed using the Instrumentation and Tracing Technology (ITT) API,
+ * employed by the Intel (R) VTune (TM) Amplifier.
+ */
+#ifdef RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS
+
+#include <ittnotify.h>
+
+#define ITT_MAX_NAME_LEN (100)
+
+/**
+ * Auxiliary ITT structure belonging to Ethernet device and using to:
+ * - track RX queue state to determine whether it is wasting loop iterations
+ * - begin or end ITT task using task domain and task name (handle)
+ */
+struct itt_profile_rx_data {
+ /**
+ * ITT domains for each queue.
+ */
+ __itt_domain *domains[RTE_MAX_QUEUES_PER_PORT];
+ /**
+ * ITT task names for each queue.
+ */
+ __itt_string_handle *handles[RTE_MAX_QUEUES_PER_PORT];
+ /**
+ * Flags indicating the queues state. Possible values:
+ * 1 - queue is wasting iterations,
+ * 0 - otherwise.
+ */
+ uint8_t queue_state[RTE_MAX_QUEUES_PER_PORT];
+};
+
+/**
+ * The pool of *itt_profile_rx_data* structures.
+ */
+struct itt_profile_rx_data itt_rx_data[RTE_MAX_ETHPORTS];
+
+
+/**
+ * This callback function manages ITT tasks collection on given port and queue.
+ * It must be registered with rte_eth_add_rx_callback() to be called from
+ * rte_eth_rx_burst(). To find more comments see rte_rx_callback_fn function
+ * type declaration.
+ */
+static uint16_t
+collect_itt_rx_burst_cb(uint16_t port_id, uint16_t queue_id,
+ __rte_unused struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, __rte_unused void *user_param)
+{
+ if (unlikely(nb_pkts == 0)) {
+ if (!itt_rx_data[port_id].queue_state[queue_id]) {
+ __itt_task_begin(
+ itt_rx_data[port_id].domains[queue_id],
+ __itt_null, __itt_null,
+ itt_rx_data[port_id].handles[queue_id]);
+ itt_rx_data[port_id].queue_state[queue_id] = 1;
+ }
+ } else {
+ if (unlikely(itt_rx_data[port_id].queue_state[queue_id])) {
+ __itt_task_end(
+ itt_rx_data[port_id].domains[queue_id]);
+ itt_rx_data[port_id].queue_state[queue_id] = 0;
+ }
+ }
+ return nb_pkts;
+}
+
+/**
+ * Initialization of itt_profile_rx_data for a given Ethernet device.
+ * This function must be invoked when ethernet device is being configured.
+ * Result will be stored in the global array *itt_rx_data*.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param port_name
+ * The name of the Ethernet device.
+ * @param rx_queue_num
+ * The number of RX queues on specified port.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static inline int
+itt_profile_rx_init(uint16_t port_id, char *port_name, uint8_t rx_queue_num)
+{
+ uint16_t q_id;
+
+ for (q_id = 0; q_id < rx_queue_num; ++q_id) {
+ char domain_name[ITT_MAX_NAME_LEN];
+
+ snprintf(domain_name, sizeof(domain_name),
+ "RXBurst.WastedIterations.Port_%s.Queue_%d",
+ port_name, q_id);
+ itt_rx_data[port_id].domains[q_id]
+ = __itt_domain_create(domain_name);
+
+ char task_name[ITT_MAX_NAME_LEN];
+
+ snprintf(task_name, sizeof(task_name),
+ "port id: %d; queue id: %d",
+ port_id, q_id);
+ itt_rx_data[port_id].handles[q_id]
+ = __itt_string_handle_create(task_name);
+
+ itt_rx_data[port_id].queue_state[q_id] = 0;
+
+ if (!rte_eth_add_rx_callback(
+ port_id, q_id, collect_itt_rx_burst_cb, NULL)) {
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+#endif /* RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS */
+
+int
+__rte_eth_profile_rx_init(__rte_unused uint16_t port_id,
+ __rte_unused struct rte_eth_dev *dev)
+{
+#ifdef RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS
+ return itt_profile_rx_init(
+ port_id, dev->data->name, dev->data->nb_rx_queues);
+#endif
+ return 0;
+}
diff --git a/examples/vhost_xen/main.h b/lib/librte_ether/ethdev_profile.h
index 5ff48fd9..697facff 100644
--- a/examples/vhost_xen/main.h
+++ b/lib/librte_ether/ethdev_profile.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,36 +31,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _MAIN_H_
-#define _MAIN_H_
+#ifndef _RTE_ETHDEV_PROFILE_H_
+#define _RTE_ETHDEV_PROFILE_H_
-/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
-#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
+#include "rte_ethdev.h"
-/*
- * Device linked list structure for data path.
- */
-struct virtio_net_data_ll
-{
- struct virtio_net *dev; /* Pointer to device created by configuration core. */
- struct virtio_net_data_ll *next; /* Pointer to next device in linked list. */
-};
-
-/*
- * Structure containing data core specific information.
+/**
+ * Initialization of profiling RX queues for the Ethernet device.
+ * Implementation of this function depends on chosen profiling method,
+ * defined in configs.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param dev
+ * Pointer to struct rte_eth_dev corresponding to given port_id.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
*/
-struct lcore_ll_info
-{
- struct virtio_net_data_ll *ll_root_free; /* Pointer to head in free linked list. */
- struct virtio_net_data_ll *ll_root_used; /* Pointer to head of used linked list. */
- uint32_t device_num; /* Number of devices on lcore. */
- volatile uint8_t dev_removal_flag; /* Flag to synchronize device removal. */
-};
+int
+__rte_eth_profile_rx_init(uint16_t port_id, struct rte_eth_dev *dev);
-struct lcore_info
-{
- struct lcore_ll_info *lcore_ll; /* Pointer to data core specific lcore_ll_info struct */
-};
-#endif /* _MAIN_H_ */
+#endif
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 0597641e..318af286 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -67,6 +66,7 @@
#include "rte_ether.h"
#include "rte_ethdev.h"
+#include "ethdev_profile.h"
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
@@ -138,8 +138,8 @@ enum {
STAT_QMAP_RX
};
-uint8_t
-rte_eth_find_next(uint8_t port_id)
+uint16_t
+rte_eth_find_next(uint16_t port_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
@@ -178,16 +178,14 @@ rte_eth_dev_allocated(const char *name)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED &&
- rte_eth_devices[i].device) {
- if (!strcmp(rte_eth_devices[i].device->name, name))
- return &rte_eth_devices[i];
- }
+ if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
+ strcmp(rte_eth_devices[i].data->name, name) == 0)
+ return &rte_eth_devices[i];
}
return NULL;
}
-static uint8_t
+static uint16_t
rte_eth_dev_find_free_port(void)
{
unsigned i;
@@ -200,7 +198,7 @@ rte_eth_dev_find_free_port(void)
}
static struct rte_eth_dev *
-eth_dev_get(uint8_t port_id)
+eth_dev_get(uint16_t port_id)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
@@ -216,7 +214,7 @@ eth_dev_get(uint8_t port_id)
struct rte_eth_dev *
rte_eth_dev_allocate(const char *name)
{
- uint8_t port_id;
+ uint16_t port_id;
struct rte_eth_dev *eth_dev;
port_id = rte_eth_dev_find_free_port();
@@ -251,7 +249,7 @@ rte_eth_dev_allocate(const char *name)
struct rte_eth_dev *
rte_eth_dev_attach_secondary(const char *name)
{
- uint8_t i;
+ uint16_t i;
struct rte_eth_dev *eth_dev;
if (rte_eth_dev_data == NULL)
@@ -285,7 +283,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
}
int
-rte_eth_dev_is_valid_port(uint8_t port_id)
+rte_eth_dev_is_valid_port(uint16_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
(rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
@@ -296,17 +294,24 @@ rte_eth_dev_is_valid_port(uint8_t port_id)
}
int
-rte_eth_dev_socket_id(uint8_t port_id)
+rte_eth_dev_socket_id(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return rte_eth_devices[port_id].data->numa_node;
}
-uint8_t
+void *
+rte_eth_dev_get_sec_ctx(uint8_t port_id)
+{
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
+ return rte_eth_devices[port_id].security_ctx;
+}
+
+uint16_t
rte_eth_dev_count(void)
{
- uint8_t p;
- uint8_t count;
+ uint16_t p;
+ uint16_t count;
count = 0;
@@ -317,9 +322,9 @@ rte_eth_dev_count(void)
}
int
-rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
+rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
{
- const char *tmp;
+ char *tmp;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -330,15 +335,14 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
- tmp = rte_eth_devices[port_id].device->name;
+ tmp = rte_eth_dev_data[port_id].name;
strcpy(name, tmp);
return 0;
}
int
-rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
+rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
- int ret;
int i;
if (name == NULL) {
@@ -347,37 +351,20 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
}
RTE_ETH_FOREACH_DEV(i) {
- if (!rte_eth_devices[i].device)
- continue;
+ if (!strncmp(name,
+ rte_eth_dev_data[i].name, strlen(name))) {
- ret = strncmp(name, rte_eth_devices[i].device->name,
- strlen(name));
- if (ret == 0) {
*port_id = i;
+
return 0;
}
}
return -ENODEV;
}
-static int
-rte_eth_dev_is_detachable(uint8_t port_id)
-{
- uint32_t dev_flags;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
-
- dev_flags = rte_eth_devices[port_id].data->dev_flags;
- if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
- (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
- return 0;
- else
- return 1;
-}
-
/* attach the new device, then store port_id of the device */
int
-rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
+rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
{
int ret = -1;
int current = rte_eth_dev_count();
@@ -423,21 +410,28 @@ err:
/* detach the device, then store the name of the device */
int
-rte_eth_dev_detach(uint8_t port_id, char *name)
+rte_eth_dev_detach(uint16_t port_id, char *name)
{
+ uint32_t dev_flags;
int ret = -1;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
if (name == NULL) {
ret = -EINVAL;
goto err;
}
- /* FIXME: move this to eal, once device flags are relocated there */
- if (rte_eth_dev_is_detachable(port_id))
+ dev_flags = rte_eth_devices[port_id].data->dev_flags;
+ if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
+ RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
+ port_id);
+ ret = -ENOTSUP;
goto err;
+ }
- snprintf(name, RTE_DEV_NAME_MAX_LEN, "%s",
- rte_eth_devices[port_id].device->name);
+ snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
+ "%s", rte_eth_devices[port_id].data->name);
ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
if (ret < 0)
@@ -501,7 +495,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
}
int
-rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
{
struct rte_eth_dev *dev;
@@ -527,7 +521,7 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
}
int
-rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
{
struct rte_eth_dev *dev;
@@ -553,7 +547,7 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
}
int
-rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
{
struct rte_eth_dev *dev;
@@ -579,7 +573,7 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
}
int
-rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
{
struct rte_eth_dev *dev;
@@ -687,12 +681,102 @@ rte_eth_speed_bitflag(uint32_t speed, int duplex)
}
}
+/**
+ * A conversion function from rxmode bitfield API.
+ */
+static void
+rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
+ uint64_t *rx_offloads)
+{
+ uint64_t offloads = 0;
+
+ if (rxmode->header_split == 1)
+ offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
+ if (rxmode->hw_ip_checksum == 1)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ if (rxmode->hw_vlan_filter == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ if (rxmode->hw_vlan_strip == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxmode->hw_vlan_extend == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ if (rxmode->jumbo_frame == 1)
+ offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (rxmode->hw_strip_crc == 1)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (rxmode->enable_scatter == 1)
+ offloads |= DEV_RX_OFFLOAD_SCATTER;
+ if (rxmode->enable_lro == 1)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ if (rxmode->hw_timestamp == 1)
+ offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ if (rxmode->security == 1)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+
+ *rx_offloads = offloads;
+}
+
+/**
+ * A conversion function from rxmode offloads API.
+ */
+static void
+rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
+ struct rte_eth_rxmode *rxmode)
+{
+
+ if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ rxmode->header_split = 1;
+ else
+ rxmode->header_split = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxmode->hw_ip_checksum = 1;
+ else
+ rxmode->hw_ip_checksum = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ rxmode->hw_vlan_filter = 1;
+ else
+ rxmode->hw_vlan_filter = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->hw_vlan_strip = 1;
+ else
+ rxmode->hw_vlan_strip = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ rxmode->hw_vlan_extend = 1;
+ else
+ rxmode->hw_vlan_extend = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ rxmode->jumbo_frame = 1;
+ else
+ rxmode->jumbo_frame = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
+ rxmode->hw_strip_crc = 1;
+ else
+ rxmode->hw_strip_crc = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ rxmode->enable_scatter = 1;
+ else
+ rxmode->enable_scatter = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ rxmode->enable_lro = 1;
+ else
+ rxmode->enable_lro = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ rxmode->hw_timestamp = 1;
+ else
+ rxmode->hw_timestamp = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ rxmode->security = 1;
+ else
+ rxmode->security = 0;
+}
+
int
-rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_conf = *dev_conf;
int diag;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -722,8 +806,20 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return -EBUSY;
}
+ /*
+ * Convert between the offloads API to enable PMDs to support
+ * only one of them.
+ */
+ if ((dev_conf->rxmode.ignore_offload_bitfield == 0)) {
+ rte_eth_convert_rx_offload_bitfield(
+ &dev_conf->rxmode, &local_conf.rxmode.offloads);
+ } else {
+ rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
+ &local_conf.rxmode);
+ }
+
/* Copy the dev_conf parameter into the dev structure */
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+ memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
/*
* Check that the numbers of RX and TX queues are not greater
@@ -767,7 +863,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
* If jumbo frames are enabled, check that the maximum RX packet
* length is supported by the configured device.
*/
- if (dev_conf->rxmode.jumbo_frame == 1) {
+ if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_conf->rxmode.max_rx_pkt_len >
dev_info.max_rx_pktlen) {
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
@@ -819,6 +915,16 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return diag;
}
+ /* Initialize Rx profiling if enabled at compilation time. */
+ diag = __rte_eth_profile_rx_init(port_id, dev);
+ if (diag != 0) {
+ RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
return 0;
}
@@ -839,7 +945,7 @@ _rte_eth_dev_reset(struct rte_eth_dev *dev)
}
static void
-rte_eth_dev_config_restore(uint8_t port_id)
+rte_eth_dev_config_restore(uint16_t port_id)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
@@ -894,7 +1000,7 @@ rte_eth_dev_config_restore(uint8_t port_id)
}
int
-rte_eth_dev_start(uint8_t port_id)
+rte_eth_dev_start(uint16_t port_id)
{
struct rte_eth_dev *dev;
int diag;
@@ -906,7 +1012,7 @@ rte_eth_dev_start(uint8_t port_id)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (dev->data->dev_started != 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
" already started\n",
port_id);
return 0;
@@ -928,7 +1034,7 @@ rte_eth_dev_start(uint8_t port_id)
}
void
-rte_eth_dev_stop(uint8_t port_id)
+rte_eth_dev_stop(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -938,7 +1044,7 @@ rte_eth_dev_stop(uint8_t port_id)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (dev->data->dev_started == 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
" already stopped\n",
port_id);
return;
@@ -949,7 +1055,7 @@ rte_eth_dev_stop(uint8_t port_id)
}
int
-rte_eth_dev_set_link_up(uint8_t port_id)
+rte_eth_dev_set_link_up(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -962,7 +1068,7 @@ rte_eth_dev_set_link_up(uint8_t port_id)
}
int
-rte_eth_dev_set_link_down(uint8_t port_id)
+rte_eth_dev_set_link_down(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -975,7 +1081,7 @@ rte_eth_dev_set_link_down(uint8_t port_id)
}
void
-rte_eth_dev_close(uint8_t port_id)
+rte_eth_dev_close(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -995,7 +1101,24 @@ rte_eth_dev_close(uint8_t port_id)
}
int
-rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+rte_eth_dev_reset(uint16_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
+
+ rte_eth_dev_stop(port_id);
+ ret = dev->dev_ops->dev_reset(dev);
+
+ return ret;
+}
+
+int
+rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
@@ -1004,6 +1127,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
uint32_t mbp_buf_size;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf local_conf;
void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1074,8 +1198,18 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
+ local_conf = *rx_conf;
+ if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
+ /**
+ * Reflect port offloads to queue offloads in order for
+ * offloads to not be discarded.
+ */
+ rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
+ &local_conf.offloads);
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
- socket_id, rx_conf, mp);
+ socket_id, &local_conf, mp);
if (!ret) {
if (!dev->data->min_rx_buf_size ||
dev->data->min_rx_buf_size > mbp_buf_size)
@@ -1085,13 +1219,63 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
return ret;
}
+/**
+ * A conversion function from txq_flags API.
+ */
+static void
+rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
+{
+ uint64_t offloads = 0;
+
+ if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+ offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
+ offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
+ offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+ if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
+ offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
+ offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
+ (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
+ offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ *tx_offloads = offloads;
+}
+
+/**
+ * A conversion function from offloads API.
+ */
+static void
+rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
+{
+ uint32_t flags = 0;
+
+ if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
+ flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
+ flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
+ if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
+ flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
+ if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
+
+ *txq_flags = flags;
+}
+
int
-rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf local_conf;
void **txq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1136,8 +1320,23 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
+ /*
+ * Convert between the offloads API to enable PMDs to support
+ * only one of them.
+ */
+ local_conf = *tx_conf;
+ if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
+ rte_eth_convert_txq_offloads(tx_conf->offloads,
+ &local_conf.txq_flags);
+ /* Keep the ignore flag. */
+ local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
+ } else {
+ rte_eth_convert_txq_flags(tx_conf->txq_flags,
+ &local_conf.offloads);
+ }
+
return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
- socket_id, tx_conf);
+ socket_id, &local_conf);
}
void
@@ -1190,7 +1389,7 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
}
int
-rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
+rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -1204,7 +1403,7 @@ rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
}
void
-rte_eth_promiscuous_enable(uint8_t port_id)
+rte_eth_promiscuous_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1217,7 +1416,7 @@ rte_eth_promiscuous_enable(uint8_t port_id)
}
void
-rte_eth_promiscuous_disable(uint8_t port_id)
+rte_eth_promiscuous_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1230,7 +1429,7 @@ rte_eth_promiscuous_disable(uint8_t port_id)
}
int
-rte_eth_promiscuous_get(uint8_t port_id)
+rte_eth_promiscuous_get(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1241,7 +1440,7 @@ rte_eth_promiscuous_get(uint8_t port_id)
}
void
-rte_eth_allmulticast_enable(uint8_t port_id)
+rte_eth_allmulticast_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1254,7 +1453,7 @@ rte_eth_allmulticast_enable(uint8_t port_id)
}
void
-rte_eth_allmulticast_disable(uint8_t port_id)
+rte_eth_allmulticast_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1267,7 +1466,7 @@ rte_eth_allmulticast_disable(uint8_t port_id)
}
int
-rte_eth_allmulticast_get(uint8_t port_id)
+rte_eth_allmulticast_get(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1292,7 +1491,7 @@ rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
}
void
-rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
+rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
@@ -1309,7 +1508,7 @@ rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
}
void
-rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
+rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
@@ -1326,7 +1525,7 @@ rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
}
int
-rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
+rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
{
struct rte_eth_dev *dev;
@@ -1337,25 +1536,42 @@ rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
- (*dev->dev_ops->stats_get)(dev, stats);
- return 0;
+ return (*dev->dev_ops->stats_get)(dev, stats);
}
-void
-rte_eth_stats_reset(uint8_t port_id)
+int
+rte_eth_stats_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
- RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
(*dev->dev_ops->stats_reset)(dev);
dev->data->rx_mbuf_alloc_failed = 0;
+
+ return 0;
+}
+
+static inline int
+get_xstats_basic_count(struct rte_eth_dev *dev)
+{
+ uint16_t nb_rxqs, nb_txqs;
+ int count;
+
+ nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ count = RTE_NB_STATS;
+ count += nb_rxqs * RTE_NB_RXQ_STATS;
+ count += nb_txqs * RTE_NB_TXQ_STATS;
+
+ return count;
}
static int
-get_xstats_count(uint8_t port_id)
+get_xstats_count(uint16_t port_id)
{
struct rte_eth_dev *dev;
int count;
@@ -1375,16 +1591,14 @@ get_xstats_count(uint8_t port_id)
} else
count = 0;
- count += RTE_NB_STATS;
- count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
- RTE_NB_RXQ_STATS;
- count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
- RTE_NB_TXQ_STATS;
+
+ count += get_xstats_basic_count(dev);
+
return count;
}
int
-rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
uint64_t *id)
{
int cnt_xstats, idx_xstat;
@@ -1427,125 +1641,97 @@ rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
return -EINVAL;
}
+/* retrieve ethdev extended statistics names */
int
-rte_eth_xstats_get_names_by_id(uint8_t port_id,
+rte_eth_xstats_get_names_by_id(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names, unsigned int size,
uint64_t *ids)
{
- /* Get all xstats */
+ struct rte_eth_xstat_name *xstats_names_copy;
+ unsigned int no_basic_stat_requested = 1;
+ unsigned int expected_entries;
+ struct rte_eth_dev *dev;
+ unsigned int i;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ ret = get_xstats_count(port_id);
+ if (ret < 0)
+ return ret;
+ expected_entries = (unsigned int)ret;
+
+ /* Return max number of stats if no ids given */
if (!ids) {
- struct rte_eth_dev *dev;
- int cnt_used_entries;
- int cnt_expected_entries;
- int cnt_driver_entries;
- uint32_t idx, id_queue;
- uint16_t num_q;
-
- cnt_expected_entries = get_xstats_count(port_id);
- if (xstats_names == NULL || cnt_expected_entries < 0 ||
- (int)size < cnt_expected_entries)
- return cnt_expected_entries;
-
- /* port_id checked in get_xstats_count() */
- dev = &rte_eth_devices[port_id];
- cnt_used_entries = 0;
-
- for (idx = 0; idx < RTE_NB_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "%s", rte_stats_strings[idx].name);
- cnt_used_entries++;
- }
- num_q = RTE_MIN(dev->data->nb_rx_queues,
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (id_queue = 0; id_queue < num_q; id_queue++) {
- for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "rx_q%u%s",
- id_queue,
- rte_rxq_stats_strings[idx].name);
- cnt_used_entries++;
- }
+ if (!xstats_names)
+ return expected_entries;
+ else if (xstats_names && size < expected_entries)
+ return expected_entries;
+ }
- }
- num_q = RTE_MIN(dev->data->nb_tx_queues,
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
- for (id_queue = 0; id_queue < num_q; id_queue++) {
- for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "tx_q%u%s",
- id_queue,
- rte_txq_stats_strings[idx].name);
- cnt_used_entries++;
+ if (ids && !xstats_names)
+ return -EINVAL;
+
+ if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
+ unsigned int basic_count = get_xstats_basic_count(dev);
+ uint64_t ids_copy[size];
+
+ for (i = 0; i < size; i++) {
+ if (ids[i] < basic_count) {
+ no_basic_stat_requested = 0;
+ break;
}
- }
- if (dev->dev_ops->xstats_get_names_by_id != NULL) {
- /* If there are any driver-specific xstats, append them
- * to end of list.
+ /*
+ * Convert ids to xstats ids that PMD knows.
+ * ids known by user are basic + extended stats.
*/
- cnt_driver_entries =
- (*dev->dev_ops->xstats_get_names_by_id)(
- dev,
- xstats_names + cnt_used_entries,
- NULL,
- size - cnt_used_entries);
- if (cnt_driver_entries < 0)
- return cnt_driver_entries;
- cnt_used_entries += cnt_driver_entries;
-
- } else if (dev->dev_ops->xstats_get_names != NULL) {
- /* If there are any driver-specific xstats, append them
- * to end of list.
- */
- cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
- dev,
- xstats_names + cnt_used_entries,
- size - cnt_used_entries);
- if (cnt_driver_entries < 0)
- return cnt_driver_entries;
- cnt_used_entries += cnt_driver_entries;
+ ids_copy[i] = ids[i] - basic_count;
}
- return cnt_used_entries;
+ if (no_basic_stat_requested)
+ return (*dev->dev_ops->xstats_get_names_by_id)(dev,
+ xstats_names, ids_copy, size);
}
- /* Get only xstats given by IDS */
- else {
- uint16_t len, i;
- struct rte_eth_xstat_name *xstats_names_copy;
- len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
+ /* Retrieve all stats */
+ if (!ids) {
+ int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
+ expected_entries);
+ if (num_stats < 0 || num_stats > (int)expected_entries)
+ return num_stats;
+ else
+ return expected_entries;
+ }
- xstats_names_copy =
- malloc(sizeof(struct rte_eth_xstat_name) * len);
- if (!xstats_names_copy) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: can't allocate memory for values_copy\n");
- free(xstats_names_copy);
- return -1;
- }
+ xstats_names_copy = calloc(expected_entries,
+ sizeof(struct rte_eth_xstat_name));
- rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
- len, NULL);
+ if (!xstats_names_copy) {
+ RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
+ return -ENOMEM;
+ }
- for (i = 0; i < size; i++) {
- if (ids[i] >= len) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: id value isn't valid\n");
- return -1;
- }
- strcpy(xstats_names[i].name,
- xstats_names_copy[ids[i]].name);
+ /* Fill xstats_names_copy structure */
+ rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries);
+
+ /* Filter stats */
+ for (i = 0; i < size; i++) {
+ if (ids[i] >= expected_entries) {
+ RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
+ free(xstats_names_copy);
+ return -1;
}
- free(xstats_names_copy);
- return size;
+ xstats_names[i] = xstats_names_copy[ids[i]];
}
+
+ free(xstats_names_copy);
+ return size;
}
int
-rte_eth_xstats_get_names(uint8_t port_id,
+rte_eth_xstats_get_names(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names,
unsigned int size)
{
@@ -1611,133 +1797,80 @@ rte_eth_xstats_get_names(uint8_t port_id,
/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
- unsigned int n)
+rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
+ uint64_t *values, unsigned int size)
{
- /* If need all xstats */
- if (!ids) {
- struct rte_eth_stats eth_stats;
- struct rte_eth_dev *dev;
- unsigned int count = 0, i, q;
- signed int xcount = 0;
- uint64_t val, *stats_ptr;
- uint16_t nb_rxqs, nb_txqs;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- dev = &rte_eth_devices[port_id];
-
- nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
- nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
- RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
- /* Return generic statistics */
- count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
- (nb_txqs * RTE_NB_TXQ_STATS);
-
-
- /* implemented by the driver */
- if (dev->dev_ops->xstats_get_by_id != NULL) {
- /* Retrieve the xstats from the driver at the end of the
- * xstats struct. Retrieve all xstats.
- */
- xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
- NULL,
- values ? values + count : NULL,
- (n > count) ? n - count : 0);
-
- if (xcount < 0)
- return xcount;
- /* implemented by the driver */
- } else if (dev->dev_ops->xstats_get != NULL) {
- /* Retrieve the xstats from the driver at the end of the
- * xstats struct. Retrieve all xstats.
- * Compatibility for PMD without xstats_get_by_ids
- */
- unsigned int size = (n > count) ? n - count : 1;
- struct rte_eth_xstat xstats[size];
-
- xcount = (*dev->dev_ops->xstats_get)(dev,
- values ? xstats : NULL, size);
-
- if (xcount < 0)
- return xcount;
+ unsigned int no_basic_stat_requested = 1;
+ unsigned int num_xstats_filled;
+ uint16_t expected_entries;
+ struct rte_eth_dev *dev;
+ unsigned int i;
+ int ret;
- if (values != NULL)
- for (i = 0 ; i < (unsigned int)xcount; i++)
- values[i + count] = xstats[i].value;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ expected_entries = get_xstats_count(port_id);
+ struct rte_eth_xstat xstats[expected_entries];
+ dev = &rte_eth_devices[port_id];
- if (n < count + xcount || values == NULL)
- return count + xcount;
+ /* Return max number of stats if no ids given */
+ if (!ids) {
+ if (!values)
+ return expected_entries;
+ else if (values && size < expected_entries)
+ return expected_entries;
+ }
- /* now fill the xstats structure */
- count = 0;
- rte_eth_stats_get(port_id, &eth_stats);
+ if (ids && !values)
+ return -EINVAL;
- /* global stats */
- for (i = 0; i < RTE_NB_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_stats_strings[i].offset);
- val = *stats_ptr;
- values[count++] = val;
- }
+ if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
+ unsigned int basic_count = get_xstats_basic_count(dev);
+ uint64_t ids_copy[size];
- /* per-rxq stats */
- for (q = 0; q < nb_rxqs; q++) {
- for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_rxq_stats_strings[i].offset +
- q * sizeof(uint64_t));
- val = *stats_ptr;
- values[count++] = val;
+ for (i = 0; i < size; i++) {
+ if (ids[i] < basic_count) {
+ no_basic_stat_requested = 0;
+ break;
}
- }
- /* per-txq stats */
- for (q = 0; q < nb_txqs; q++) {
- for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
- stats_ptr = RTE_PTR_ADD(&eth_stats,
- rte_txq_stats_strings[i].offset +
- q * sizeof(uint64_t));
- val = *stats_ptr;
- values[count++] = val;
- }
+ /*
+ * Convert ids to xstats ids that PMD knows.
+ * ids known by user are basic + extended stats.
+ */
+ ids_copy[i] = ids[i] - basic_count;
}
- return count + xcount;
+ if (no_basic_stat_requested)
+ return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
+ values, size);
}
- /* Need only xstats given by IDS array */
- else {
- uint16_t i, size;
- uint64_t *values_copy;
-
- size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
- values_copy = malloc(sizeof(*values_copy) * size);
- if (!values_copy) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: can't allocate memory for values_copy\n");
- return -1;
- }
+ /* Fill the xstats structure */
+ ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
+ if (ret < 0)
+ return ret;
+ num_xstats_filled = (unsigned int)ret;
- rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
+ /* Return all stats */
+ if (!ids) {
+ for (i = 0; i < num_xstats_filled; i++)
+ values[i] = xstats[i].value;
+ return expected_entries;
+ }
- for (i = 0; i < n; i++) {
- if (ids[i] >= size) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: id value isn't valid\n");
- return -1;
- }
- values[i] = values_copy[ids[i]];
+ /* Filter stats */
+ for (i = 0; i < size; i++) {
+ if (ids[i] >= expected_entries) {
+ RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
+ return -1;
}
- free(values_copy);
- return n;
+ values[i] = xstats[ids[i]].value;
}
+ return size;
}
int
-rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
+rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
unsigned int n)
{
struct rte_eth_stats eth_stats;
@@ -1819,7 +1952,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
/* reset ethdev extended statistics */
void
-rte_eth_xstats_reset(uint8_t port_id)
+rte_eth_xstats_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -1837,7 +1970,7 @@ rte_eth_xstats_reset(uint8_t port_id)
}
static int
-set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
+set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
uint8_t is_rx)
{
struct rte_eth_dev *dev;
@@ -1853,7 +1986,7 @@ set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
int
-rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
+rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
uint8_t stat_idx)
{
return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
@@ -1862,7 +1995,7 @@ rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
int
-rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
+rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
uint8_t stat_idx)
{
return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
@@ -1870,7 +2003,7 @@ rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
}
int
-rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
+rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
{
struct rte_eth_dev *dev;
@@ -1882,7 +2015,7 @@ rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
}
void
-rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
+rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
const struct rte_eth_desc_lim lim = {
@@ -1906,7 +2039,7 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
}
int
-rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num)
{
int i, j;
@@ -1932,7 +2065,7 @@ rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
}
void
-rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
+rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
@@ -1943,7 +2076,7 @@ rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
int
-rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
+rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
{
struct rte_eth_dev *dev;
@@ -1955,7 +2088,7 @@ rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
}
int
-rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
+rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
{
int ret;
struct rte_eth_dev *dev;
@@ -1972,14 +2105,15 @@ rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
}
int
-rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
+rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
{
struct rte_eth_dev *dev;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)) {
RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
return -ENOSYS;
}
@@ -2011,7 +2145,8 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
}
int
-rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
+rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
+ int on)
{
struct rte_eth_dev *dev;
@@ -2029,7 +2164,7 @@ rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int o
}
int
-rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
+rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
enum rte_vlan_type vlan_type,
uint16_t tpid)
{
@@ -2043,35 +2178,57 @@ rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
}
int
-rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
+rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
{
struct rte_eth_dev *dev;
int ret = 0;
int mask = 0;
int cur, org = 0;
+ uint64_t orig_offloads;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ /* save original values in case of failure */
+ orig_offloads = dev->data->dev_conf.rxmode.offloads;
+
/*check which option changed by application*/
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_STRIP;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_STRIP;
mask |= ETH_VLAN_STRIP_MASK;
}
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_FILTER;
mask |= ETH_VLAN_FILTER_MASK;
}
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_EXTEND;
mask |= ETH_VLAN_EXTEND_MASK;
}
@@ -2080,13 +2237,26 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
- (*dev->dev_ops->vlan_offload_set)(dev, mask);
+
+ /*
+ * Convert to the offload bitfield API just in case the underlying PMD
+ * still supporting it.
+ */
+ rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
+ &dev->data->dev_conf.rxmode);
+ ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
+ if (ret) {
+ /* hit an error restore original values */
+ dev->data->dev_conf.rxmode.offloads = orig_offloads;
+ rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
+ &dev->data->dev_conf.rxmode);
+ }
return ret;
}
int
-rte_eth_dev_get_vlan_offload(uint8_t port_id)
+rte_eth_dev_get_vlan_offload(uint16_t port_id)
{
struct rte_eth_dev *dev;
int ret = 0;
@@ -2094,20 +2264,23 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
ret |= ETH_VLAN_STRIP_OFFLOAD;
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
ret |= ETH_VLAN_FILTER_OFFLOAD;
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
ret |= ETH_VLAN_EXTEND_OFFLOAD;
return ret;
}
int
-rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
+rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
{
struct rte_eth_dev *dev;
@@ -2120,7 +2293,7 @@ rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
}
int
-rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
@@ -2132,7 +2305,7 @@ rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
}
int
-rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
@@ -2148,7 +2321,8 @@ rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
}
int
-rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
+rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
+ struct rte_eth_pfc_conf *pfc_conf)
{
struct rte_eth_dev *dev;
@@ -2214,7 +2388,7 @@ rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
}
int
-rte_eth_dev_rss_reta_update(uint8_t port_id,
+rte_eth_dev_rss_reta_update(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
@@ -2240,7 +2414,7 @@ rte_eth_dev_rss_reta_update(uint8_t port_id,
}
int
-rte_eth_dev_rss_reta_query(uint8_t port_id,
+rte_eth_dev_rss_reta_query(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
@@ -2260,26 +2434,19 @@ rte_eth_dev_rss_reta_query(uint8_t port_id,
}
int
-rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+rte_eth_dev_rss_hash_update(uint16_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
- uint16_t rss_hash_protos;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- rss_hash_protos = rss_conf->rss_hf;
- if ((rss_hash_protos != 0) &&
- ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
- RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
- rss_hash_protos);
- return -EINVAL;
- }
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
}
int
-rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
@@ -2291,7 +2458,7 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
}
int
-rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct rte_eth_dev *dev;
@@ -2313,7 +2480,7 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
}
int
-rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct rte_eth_dev *dev;
@@ -2336,7 +2503,7 @@ rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
}
int
-rte_eth_led_on(uint8_t port_id)
+rte_eth_led_on(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -2347,7 +2514,7 @@ rte_eth_led_on(uint8_t port_id)
}
int
-rte_eth_led_off(uint8_t port_id)
+rte_eth_led_off(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -2362,7 +2529,7 @@ rte_eth_led_off(uint8_t port_id)
* an empty spot.
*/
static int
-get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -2381,7 +2548,7 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
static const struct ether_addr null_mac_addr;
int
-rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
+rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
uint32_t pool)
{
struct rte_eth_dev *dev;
@@ -2434,7 +2601,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
}
int
-rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
+rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
int index;
@@ -2463,7 +2630,7 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
}
int
-rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
+rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
@@ -2489,7 +2656,7 @@ rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
* an empty spot.
*/
static int
-get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -2508,7 +2675,7 @@ get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
}
int
-rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
+rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
uint8_t on)
{
int index;
@@ -2560,7 +2727,7 @@ rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
}
int
-rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
+rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
{
struct rte_eth_dev *dev;
@@ -2572,7 +2739,7 @@ rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
}
-int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
uint16_t tx_rate)
{
struct rte_eth_dev *dev;
@@ -2603,7 +2770,7 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
}
int
-rte_eth_mirror_rule_set(uint8_t port_id,
+rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
@@ -2641,7 +2808,7 @@ rte_eth_mirror_rule_set(uint8_t port_id,
}
int
-rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
+rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
{
struct rte_eth_dev *dev;
@@ -2654,7 +2821,7 @@ rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
}
int
-rte_eth_dev_callback_register(uint8_t port_id,
+rte_eth_dev_callback_register(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
@@ -2694,7 +2861,7 @@ rte_eth_dev_callback_register(uint8_t port_id,
}
int
-rte_eth_dev_callback_unregister(uint8_t port_id,
+rte_eth_dev_callback_unregister(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
@@ -2766,7 +2933,7 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
}
int
-rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
+rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
{
uint32_t vec;
struct rte_eth_dev *dev;
@@ -2818,16 +2985,11 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
- if (rte_xen_dom0_supported())
- return rte_memzone_reserve_bounded(z_name, size, socket_id,
- 0, align, RTE_PGSIZE_2M);
- else
- return rte_memzone_reserve_aligned(z_name, size, socket_id,
- 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
}
int
-rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
int epfd, int op, void *data)
{
uint32_t vec;
@@ -2867,7 +3029,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_dev_rx_intr_enable(uint8_t port_id,
+rte_eth_dev_rx_intr_enable(uint16_t port_id,
uint16_t queue_id)
{
struct rte_eth_dev *dev;
@@ -2881,7 +3043,7 @@ rte_eth_dev_rx_intr_enable(uint8_t port_id,
}
int
-rte_eth_dev_rx_intr_disable(uint8_t port_id,
+rte_eth_dev_rx_intr_disable(uint16_t port_id,
uint16_t queue_id)
{
struct rte_eth_dev *dev;
@@ -2896,7 +3058,8 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id,
int
-rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
+rte_eth_dev_filter_supported(uint16_t port_id,
+ enum rte_filter_type filter_type)
{
struct rte_eth_dev *dev;
@@ -2909,7 +3072,7 @@ rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
}
int
-rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
@@ -2922,7 +3085,7 @@ rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
}
void *
-rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
@@ -2964,7 +3127,7 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
}
void *
-rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
@@ -2999,7 +3162,7 @@ rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
}
void *
-rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
@@ -3042,7 +3205,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
@@ -3076,7 +3239,7 @@ rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
@@ -3110,7 +3273,7 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)
{
struct rte_eth_dev *dev;
@@ -3134,7 +3297,7 @@ rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct rte_eth_dev *dev;
@@ -3158,7 +3321,7 @@ rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
}
int
-rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+rte_eth_dev_set_mc_addr_list(uint16_t port_id,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
@@ -3172,7 +3335,7 @@ rte_eth_dev_set_mc_addr_list(uint8_t port_id,
}
int
-rte_eth_timesync_enable(uint8_t port_id)
+rte_eth_timesync_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -3184,7 +3347,7 @@ rte_eth_timesync_enable(uint8_t port_id)
}
int
-rte_eth_timesync_disable(uint8_t port_id)
+rte_eth_timesync_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -3196,7 +3359,7 @@ rte_eth_timesync_disable(uint8_t port_id)
}
int
-rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
+rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
uint32_t flags)
{
struct rte_eth_dev *dev;
@@ -3209,7 +3372,8 @@ rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
}
int
-rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
+rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
+ struct timespec *timestamp)
{
struct rte_eth_dev *dev;
@@ -3221,7 +3385,7 @@ rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
}
int
-rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
+rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
{
struct rte_eth_dev *dev;
@@ -3233,7 +3397,7 @@ rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
}
int
-rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
+rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
{
struct rte_eth_dev *dev;
@@ -3245,7 +3409,7 @@ rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
}
int
-rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
+rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
{
struct rte_eth_dev *dev;
@@ -3257,7 +3421,7 @@ rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
}
int
-rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
+rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
{
struct rte_eth_dev *dev;
@@ -3269,7 +3433,7 @@ rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
}
int
-rte_eth_dev_get_eeprom_length(uint8_t port_id)
+rte_eth_dev_get_eeprom_length(uint16_t port_id)
{
struct rte_eth_dev *dev;
@@ -3281,7 +3445,7 @@ rte_eth_dev_get_eeprom_length(uint8_t port_id)
}
int
-rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
{
struct rte_eth_dev *dev;
@@ -3293,7 +3457,7 @@ rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
}
int
-rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
{
struct rte_eth_dev *dev;
@@ -3305,7 +3469,7 @@ rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
}
int
-rte_eth_dev_get_dcb_info(uint8_t port_id,
+rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info)
{
struct rte_eth_dev *dev;
@@ -3320,7 +3484,7 @@ rte_eth_dev_get_dcb_info(uint8_t port_id,
}
int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
+rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
struct rte_eth_dev *dev;
@@ -3343,7 +3507,7 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
}
int
-rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
+rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
uint32_t mask,
uint8_t en)
@@ -3387,7 +3551,7 @@ rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
}
int
-rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc)
{
@@ -3409,3 +3573,21 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
return 0;
}
+
+int
+rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (pool == NULL)
+ return -EINVAL;
+
+ dev = &rte_eth_devices[port_id];
+
+ if (*dev->dev_ops->pool_ops_supported == NULL)
+ return 1; /* all pools are supported */
+
+ return (*dev->dev_ops->pool_ops_supported)(dev, pool);
+}
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 0adf3274..18e474db 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -180,6 +180,8 @@ extern "C" {
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_errno.h>
+#include <rte_common.h>
+
#include "rte_ether.h"
#include "rte_eth_ctrl.h"
#include "rte_dev_info.h"
@@ -348,7 +350,18 @@ struct rte_eth_rxmode {
enum rte_eth_rx_mq_mode mq_mode;
uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
+ /**
+ * Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+ * Only offloads set on rx_offload_capa field on rte_eth_dev_info
+ * structure are allowed to be set.
+ */
+ uint64_t offloads;
__extension__
+ /**
+ * Below bitfield API is obsolete. Application should
+ * enable per-port offloads using the offload field
+ * above.
+ */
uint16_t header_split : 1, /**< Header Split enable. */
hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
hw_vlan_filter : 1, /**< VLAN filter enable. */
@@ -357,7 +370,19 @@ struct rte_eth_rxmode {
jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
enable_scatter : 1, /**< Enable scatter packets rx handler */
- enable_lro : 1; /**< Enable LRO */
+ enable_lro : 1, /**< Enable LRO */
+ hw_timestamp : 1, /**< Enable HW timestamp */
+ security : 1, /**< Enable rte_security offloads */
+ /**
+ * When set the offload bitfield should be ignored.
+ * Instead per-port Rx offloads should be set on offloads
+ * field above.
+ * Per-queue offloads shuold be set on rte_eth_rxq_conf
+ * structure.
+ * This bit is temporary till rxmode bitfield offloads API will
+ * be deprecated.
+ */
+ ignore_offload_bitfield : 1;
};
/**
@@ -671,6 +696,12 @@ struct rte_eth_vmdq_rx_conf {
*/
struct rte_eth_txmode {
enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
+ /**
+ * Per-port Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+ * Only offloads set on tx_offload_capa field on rte_eth_dev_info
+ * structure are allowed to be set.
+ */
+ uint64_t offloads;
/* For i40e specifically */
uint16_t pvid;
@@ -691,6 +722,12 @@ struct rte_eth_rxconf {
uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
+ /**
+ * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
+ * Only offloads set on rx_queue_offload_capa or rx_offload_capa
+ * fields on rte_eth_dev_info structure are allowed to be set.
+ */
+ uint64_t offloads;
};
#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
@@ -707,6 +744,15 @@ struct rte_eth_rxconf {
(ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
ETH_TXQ_FLAGS_NOXSUMTCP)
/**
+ * When set the txq_flags should be ignored,
+ * instead per-queue Tx offloads will be set on offloads field
+ * located on rte_eth_txq_conf struct.
+ * This flag is temporary till the rte_eth_txq_conf.txq_flags
+ * API will be deprecated.
+ */
+#define ETH_TXQ_FLAGS_IGNORE 0x8000
+
+/**
* A structure used to configure a TX ring of an Ethernet port.
*/
struct rte_eth_txconf {
@@ -717,6 +763,12 @@ struct rte_eth_txconf {
uint32_t txq_flags; /**< Set flags for the Tx queue */
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
+ /**
+ * Per-queue Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
+ * Only offloads set on tx_queue_offload_capa or tx_offload_capa
+ * fields on rte_eth_dev_info structure are allowed to be set.
+ */
+ uint64_t offloads;
};
/**
@@ -874,7 +926,7 @@ struct rte_eth_conf {
/**< Port dcb RX configuration. */
struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
/**< Port vmdq RX configuration. */
- } rx_adv_conf; /**< Port RX filtering configuration (union). */
+ } rx_adv_conf; /**< Port RX filtering configuration. */
union {
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
/**< Port vmdq+dcb TX configuration. */
@@ -907,6 +959,20 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
#define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
+#define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
+#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
+#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
+#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
+#define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
+#define DEV_RX_OFFLOAD_SCATTER 0x00002000
+#define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
+#define DEV_RX_OFFLOAD_SECURITY 0x00008000
+#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM)
+#define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
/**
* TX offload capabilities of a device.
@@ -929,6 +995,14 @@ struct rte_eth_conf {
/**< Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
* tx queue without SW lock.
*/
+#define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000
+/**< Device supports multi segment send. */
+#define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000
+/**< Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool and has refcnt = 1.
+ */
+#define DEV_TX_OFFLOAD_SECURITY 0x00020000
struct rte_pci_device;
@@ -949,8 +1023,14 @@ struct rte_eth_dev_info {
/** Maximum number of hash MAC addresses for MTA and UTA. */
uint16_t max_vfs; /**< Maximum number of VFs. */
uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
- uint32_t rx_offload_capa; /**< Device RX offload capabilities. */
- uint32_t tx_offload_capa; /**< Device TX offload capabilities. */
+ uint64_t rx_offload_capa;
+ /**< Device per port RX offload capabilities. */
+ uint64_t tx_offload_capa;
+ /**< Device per port TX offload capabilities. */
+ uint64_t rx_queue_offload_capa;
+ /**< Device per queue RX offload capabilities. */
+ uint64_t tx_queue_offload_capa;
+ /**< Device per queue TX offload capabilities. */
uint16_t reta_size;
/**< Device redirection table size, the total number of entries. */
uint8_t hash_key_size; /**< Hash key size in bytes */
@@ -1076,8 +1156,6 @@ TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
} \
} while (0)
-#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
-
/**
* l2 tunnel configuration.
*/
@@ -1115,6 +1193,9 @@ typedef int (*eth_dev_set_link_down_t)(struct rte_eth_dev *dev);
typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
/**< @internal Function used to close a configured Ethernet device. */
+typedef int (*eth_dev_reset_t)(struct rte_eth_dev *dev);
+/** <@internal Function used to reset a configured Ethernet device. */
+
typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
/**< @internal Function used to enable the RX promiscuous mode of an Ethernet device. */
@@ -1131,7 +1212,7 @@ typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
int wait_to_complete);
/**< @internal Get link speed, duplex mode and state (up/down) of an Ethernet device. */
-typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
+typedef int (*eth_stats_get_t)(struct rte_eth_dev *dev,
struct rte_eth_stats *igb_stats);
/**< @internal Get global I/O statistics of an Ethernet device. */
@@ -1245,7 +1326,7 @@ typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
enum rte_vlan_type type, uint16_t tpid);
/**< @internal set the outer/inner VLAN-TPID by an Ethernet device. */
-typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
+typedef int (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
/**< @internal set VLAN offload function by an Ethernet device. */
typedef int (*vlan_pvid_set_t)(struct rte_eth_dev *dev,
@@ -1421,10 +1502,17 @@ typedef int (*eth_filter_ctrl_t)(struct rte_eth_dev *dev,
typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
+typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
+/**< @internal Get Trafffic Metering and Policing (MTR) operations */
+
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
/**< @internal Get dcb information on an Ethernet device */
+typedef int (*eth_pool_ops_supported_t)(struct rte_eth_dev *dev,
+ const char *pool);
+/**< @internal Test if a port supports specific mempool ops */
+
/**
* @internal A structure containing the functions exported by an Ethernet driver.
*/
@@ -1435,6 +1523,7 @@ struct eth_dev_ops {
eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */
eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */
eth_dev_close_t dev_close; /**< Close device. */
+ eth_dev_reset_t dev_reset; /**< Reset device. */
eth_link_update_t link_update; /**< Get device link state. */
eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
@@ -1544,6 +1633,12 @@ struct eth_dev_ops {
eth_tm_ops_get_t tm_ops_get;
/**< Get Traffic Management (TM) operations. */
+
+ eth_mtr_ops_get_t mtr_ops_get;
+ /**< Get Traffic Metering and Policing (MTR) operations. */
+
+ eth_pool_ops_supported_t pool_ops_supported;
+ /**< Test if a port supports specific mempool ops */
};
/**
@@ -1568,7 +1663,7 @@ struct eth_dev_ops {
* @return
* The number of packets returned to the user.
*/
-typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
+typedef uint16_t (*rte_rx_callback_fn)(uint16_t port, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
void *user_param);
@@ -1592,7 +1687,7 @@ typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
* @return
* The number of packets to be written to the NIC.
*/
-typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
+typedef uint16_t (*rte_tx_callback_fn)(uint16_t port, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
/**
@@ -1649,8 +1744,12 @@ struct rte_eth_dev {
*/
struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
enum rte_eth_dev_state state; /**< Flag indicating the port state */
+ void *security_ctx; /**< Context for security ops */
} __rte_cache_aligned;
+void *
+rte_eth_dev_get_sec_ctx(uint8_t port_id);
+
struct rte_eth_dev_sriov {
uint8_t active; /**< SRIOV is active with 16, 32 or 64 pools */
uint8_t nb_q_per_pool; /**< rx queue number per pool */
@@ -1695,7 +1794,7 @@ struct rte_eth_dev_data {
/** bitmap array of associating Ethernet MAC addresses to pools */
struct ether_addr* hash_mac_addrs;
/** Device Ethernet MAC addresses of hash filtering. */
- uint8_t port_id; /**< Device [external] port identifier. */
+ uint16_t port_id; /**< Device [external] port identifier. */
__extension__
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
@@ -1713,8 +1812,6 @@ struct rte_eth_dev_data {
/**< VLAN filter configuration. */
};
-/** Device supports hotplug detach */
-#define RTE_ETH_DEV_DETACHABLE 0x0001
/** Device supports link state interrupt */
#define RTE_ETH_DEV_INTR_LSC 0x0002
/** Device is a bonded slave */
@@ -1737,7 +1834,7 @@ extern struct rte_eth_dev rte_eth_devices[];
* @return
* Next valid port id, RTE_MAX_ETHPORTS if there is none.
*/
-uint8_t rte_eth_find_next(uint8_t port_id);
+uint16_t rte_eth_find_next(uint16_t port_id);
/**
* Macro to iterate over all enabled ethdev ports.
@@ -1760,7 +1857,7 @@ uint8_t rte_eth_find_next(uint8_t port_id);
* @return
* - The total number of usable Ethernet devices.
*/
-uint8_t rte_eth_dev_count(void);
+uint16_t rte_eth_dev_count(void);
/**
* @internal
@@ -1821,7 +1918,7 @@ int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
* @return
* 0 on success and port_id is filled, negative on error
*/
-int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
+int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
/**
* Detach a Ethernet device specified by port identifier.
@@ -1836,7 +1933,7 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
* @return
* 0 on success and devname is filled, negative on error
*/
-int rte_eth_dev_detach(uint8_t port_id, char *devname);
+int rte_eth_dev_detach(uint16_t port_id, char *devname);
/**
* Convert a numerical speed in Mbps to a bitmap flag that can be used in
@@ -1870,6 +1967,9 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
* each statically configurable offload hardware feature provided by
* Ethernet devices, such as IP checksum or VLAN tag stripping for
* example.
+ * The Rx offload bitfield API is obsolete and will be deprecated.
+ * Applications should set the ignore_bitfield_offloads bit on *rxmode*
+ * structure and use offloads field to set per-port offloads instead.
* - the Receive Side Scaling (RSS) configuration when using multiple RX
* queues per port.
*
@@ -1880,7 +1980,7 @@ uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
* - 0: Success, device configured.
* - <0: Error code returned by the driver configuration function.
*/
-int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
+int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
/**
@@ -1923,6 +2023,8 @@ void _rte_eth_dev_reset(struct rte_eth_dev *dev);
* The *rx_conf* structure contains an *rx_thresh* structure with the values
* of the Prefetch, Host, and Write-Back threshold registers of the receive
* ring.
+ * In addition it contains the hardware offloads features to activate using
+ * the DEV_RX_OFFLOAD_* flags.
* @param mb_pool
* The pointer to the memory pool from which to allocate *rte_mbuf* network
* memory buffers to populate each descriptor of the receive ring.
@@ -1935,7 +2037,7 @@ void _rte_eth_dev_reset(struct rte_eth_dev *dev);
* allocate network memory buffers from the memory pool when
* initializing receive descriptors.
*/
-int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
@@ -1976,6 +2078,11 @@ int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
* - The *txq_flags* member contains flags to pass to the TX queue setup
* function to configure the behavior of the TX queue. This should be set
* to 0 if no special configuration is required.
+ * This API is obsolete and will be deprecated. Applications
+ * should set it to ETH_TXQ_FLAGS_IGNORE and use
+ * the offloads field below.
+ * - The *offloads* member contains Tx offloads to be enabled.
+ * Offloads which are not set cannot be used on the datapath.
*
* Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
* the transmit function to use default values.
@@ -1983,7 +2090,7 @@ int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
* - 0: Success, the transmit queue is correctly set up.
* - -ENOMEM: Unable to allocate the transmit ring descriptors.
*/
-int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -1997,7 +2104,7 @@ int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
* a default of zero if the socket could not be determined.
* -1 is returned is the port_id value is out of range.
*/
-int rte_eth_dev_socket_id(uint8_t port_id);
+int rte_eth_dev_socket_id(uint16_t port_id);
/**
* Check if port_id of device is attached
@@ -2008,7 +2115,7 @@ int rte_eth_dev_socket_id(uint8_t port_id);
* - 0 if port is out of range or not attached
* - 1 if device is attached
*/
-int rte_eth_dev_is_valid_port(uint8_t port_id);
+int rte_eth_dev_is_valid_port(uint16_t port_id);
/**
* Start specified RX queue of a port. It is used when rx_deferred_start
@@ -2025,7 +2132,7 @@ int rte_eth_dev_is_valid_port(uint8_t port_id);
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
+int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
/**
* Stop specified RX queue of a port
@@ -2041,7 +2148,7 @@ int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
+int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
/**
* Start TX for specified queue of a port. It is used when tx_deferred_start
@@ -2058,7 +2165,7 @@ int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
+int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
/**
* Stop specified TX queue of a port
@@ -2074,7 +2181,7 @@ int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
+int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
@@ -2093,7 +2200,7 @@ int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
* - 0: Success, Ethernet device started.
* - <0: Error code of the driver device start function.
*/
-int rte_eth_dev_start(uint8_t port_id);
+int rte_eth_dev_start(uint16_t port_id);
/**
* Stop an Ethernet device. The device can be restarted with a call to
@@ -2102,7 +2209,7 @@ int rte_eth_dev_start(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_dev_stop(uint8_t port_id);
+void rte_eth_dev_stop(uint16_t port_id);
/**
@@ -2117,7 +2224,7 @@ void rte_eth_dev_stop(uint8_t port_id);
* - 0: Success, Ethernet device linked up.
* - <0: Error code of the driver device link up function.
*/
-int rte_eth_dev_set_link_up(uint8_t port_id);
+int rte_eth_dev_set_link_up(uint16_t port_id);
/**
* Link down an Ethernet device.
@@ -2128,7 +2235,7 @@ int rte_eth_dev_set_link_up(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-int rte_eth_dev_set_link_down(uint8_t port_id);
+int rte_eth_dev_set_link_down(uint16_t port_id);
/**
* Close a stopped Ethernet device. The device cannot be restarted!
@@ -2138,7 +2245,46 @@ int rte_eth_dev_set_link_down(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_dev_close(uint8_t port_id);
+void rte_eth_dev_close(uint16_t port_id);
+
+/**
+ * Reset a Ethernet device and keep its port id.
+ *
+ * When a port has to be reset passively, the DPDK application can invoke
+ * this function. For example when a PF is reset, all its VFs should also
+ * be reset. Normally a DPDK application can invoke this function when
+ * RTE_ETH_EVENT_INTR_RESET event is detected, but can also use it to start
+ * a port reset in other circumstances.
+ *
+ * When this function is called, it first stops the port and then calls the
+ * PMD specific dev_uninit( ) and dev_init( ) to return the port to initial
+ * state, in which no Tx and Rx queues are setup, as if the port has been
+ * reset and not started. The port keeps the port id it had before the
+ * function call.
+ *
+ * After calling rte_eth_dev_reset( ), the application should use
+ * rte_eth_dev_configure( ), rte_eth_rx_queue_setup( ),
+ * rte_eth_tx_queue_setup( ), and rte_eth_dev_start( )
+ * to reconfigure the device as appropriate.
+ *
+ * Note: To avoid unexpected behavior, the application should stop calling
+ * Tx and Rx functions before calling rte_eth_dev_reset( ). For thread
+ * safety, all these controlling functions should be called from the same
+ * thread.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-EINVAL) if port identifier is invalid.
+ * - (-ENOTSUP) if hardware doesn't support this function.
+ * - (-EPERM) if not ran from the primary process.
+ * - (-EIO) if re-initialisation failed.
+ * - (-ENOMEM) if the reset failed due to OOM.
+ * - (-EAGAIN) if the reset temporarily failed and should be retried later.
+ */
+int rte_eth_dev_reset(uint16_t port_id);
/**
* Enable receipt in promiscuous mode for an Ethernet device.
@@ -2146,7 +2292,7 @@ void rte_eth_dev_close(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_promiscuous_enable(uint8_t port_id);
+void rte_eth_promiscuous_enable(uint16_t port_id);
/**
* Disable receipt in promiscuous mode for an Ethernet device.
@@ -2154,7 +2300,7 @@ void rte_eth_promiscuous_enable(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_promiscuous_disable(uint8_t port_id);
+void rte_eth_promiscuous_disable(uint16_t port_id);
/**
* Return the value of promiscuous mode for an Ethernet device.
@@ -2166,7 +2312,7 @@ void rte_eth_promiscuous_disable(uint8_t port_id);
* - (0) if promiscuous is disabled.
* - (-1) on error
*/
-int rte_eth_promiscuous_get(uint8_t port_id);
+int rte_eth_promiscuous_get(uint16_t port_id);
/**
* Enable the receipt of any multicast frame by an Ethernet device.
@@ -2174,7 +2320,7 @@ int rte_eth_promiscuous_get(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_allmulticast_enable(uint8_t port_id);
+void rte_eth_allmulticast_enable(uint16_t port_id);
/**
* Disable the receipt of all multicast frames by an Ethernet device.
@@ -2182,7 +2328,7 @@ void rte_eth_allmulticast_enable(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_allmulticast_disable(uint8_t port_id);
+void rte_eth_allmulticast_disable(uint16_t port_id);
/**
* Return the value of allmulticast mode for an Ethernet device.
@@ -2194,7 +2340,7 @@ void rte_eth_allmulticast_disable(uint8_t port_id);
* - (0) if allmulticast is disabled.
* - (-1) on error
*/
-int rte_eth_allmulticast_get(uint8_t port_id);
+int rte_eth_allmulticast_get(uint16_t port_id);
/**
* Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
@@ -2207,7 +2353,7 @@ int rte_eth_allmulticast_get(uint8_t port_id);
* A pointer to an *rte_eth_link* structure to be filled with
* the status, the speed and the mode of the Ethernet device link.
*/
-void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
+void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
/**
* Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
@@ -2220,7 +2366,7 @@ void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
* A pointer to an *rte_eth_link* structure to be filled with
* the status, the speed and the mode of the Ethernet device link.
*/
-void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
+void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
/**
* Retrieve the general I/O statistics of an Ethernet device.
@@ -2239,15 +2385,19 @@ void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
+int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
/**
* Reset the general I/O statistics of an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
+ * @return
+ * - (0) if device notified to reset stats.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
*/
-void rte_eth_stats_reset(uint8_t port_id);
+int rte_eth_stats_reset(uint16_t port_id);
/**
* Retrieve names of extended statistics of an Ethernet device.
@@ -2269,7 +2419,7 @@ void rte_eth_stats_reset(uint8_t port_id);
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get_names(uint8_t port_id,
+int rte_eth_xstats_get_names(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names,
unsigned int size);
@@ -2295,7 +2445,7 @@ int rte_eth_xstats_get_names(uint8_t port_id,
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
+int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
unsigned int n);
/**
@@ -2321,7 +2471,7 @@ int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
* - A negative value on error (invalid port id).
*/
int
-rte_eth_xstats_get_names_by_id(uint8_t port_id,
+rte_eth_xstats_get_names_by_id(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names, unsigned int size,
uint64_t *ids);
@@ -2333,23 +2483,23 @@ rte_eth_xstats_get_names_by_id(uint8_t port_id,
* @param ids
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
- * can be set to NULL if n is 0. In this case function will retrieve
+ * can be set to NULL if size is 0. In this case function will retrieve
* all avalible statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
- * @param n
+ * @param size
* The size of the ids array (number of elements).
* @return
- * - A positive value lower or equal to n: success. The return value
+ * - A positive value lower or equal to size: success. The return value
* is the number of entries filled in the stats table.
- * - A positive value higher than n: error, the given statistics table
+ * - A positive value higher than size: error, the given statistics table
* is too small. The return value corresponds to the size that should
* be given to succeed. The entries in the table are not valid and
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
- uint64_t *values, unsigned int n);
+int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
+ uint64_t *values, unsigned int size);
/**
* Gets the ID of a statistic from its name.
@@ -2368,7 +2518,7 @@ int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
* -ENODEV for invalid port_id,
* -EINVAL if the xstat_name doesn't exist in port_id
*/
-int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
uint64_t *id);
/**
@@ -2377,7 +2527,7 @@ int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_xstats_reset(uint8_t port_id);
+void rte_eth_xstats_reset(uint16_t port_id);
/**
* Set a mapping for the specified transmit queue to the specified per-queue
@@ -2396,7 +2546,7 @@ void rte_eth_xstats_reset(uint8_t port_id);
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
+int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
uint16_t tx_queue_id, uint8_t stat_idx);
/**
@@ -2416,7 +2566,7 @@ int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
+int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
uint16_t rx_queue_id,
uint8_t stat_idx);
@@ -2429,7 +2579,7 @@ int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
* A pointer to a structure of type *ether_addr* to be filled with
* the Ethernet address of the Ethernet device.
*/
-void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
+void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
/**
* Retrieve the contextual information of an Ethernet device.
@@ -2440,7 +2590,7 @@ void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
* A pointer to a structure of type *rte_eth_dev_info* to be filled with
* the contextual information of the Ethernet device.
*/
-void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
+void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
/**
* Retrieve the firmware version of a device.
@@ -2460,7 +2610,7 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
* - (>0) if *fw_size* is not enough to store firmware version, return
* the size of the non truncated string.
*/
-int rte_eth_dev_fw_version_get(uint8_t port_id,
+int rte_eth_dev_fw_version_get(uint16_t port_id,
char *fw_version, size_t fw_size);
/**
@@ -2501,7 +2651,7 @@ int rte_eth_dev_fw_version_get(uint8_t port_id,
* count of supported ptypes will be returned.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num);
/**
@@ -2515,7 +2665,7 @@ int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
+int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
/**
* Change the MTU of an Ethernet device.
@@ -2531,7 +2681,7 @@ int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
* - (-EINVAL) if *mtu* invalid.
* - (-EBUSY) if operation is not allowed when the port is running
*/
-int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
+int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
/**
* Enable/Disable hardware filtering by an Ethernet device of received
@@ -2551,7 +2701,7 @@ int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
* - (-ENOSYS) if VLAN filtering on *port_id* disabled.
* - (-EINVAL) if *vlan_id* > 4095.
*/
-int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
+int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
/**
* Enable/Disable hardware VLAN Strip by a rx queue of an Ethernet device.
@@ -2572,7 +2722,7 @@ int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if *rx_queue_id* invalid.
*/
-int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
+int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
int on);
/**
@@ -2591,7 +2741,7 @@ int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
* - (-ENOSUP) if hardware-assisted VLAN TPID setup is not supported.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
+int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
enum rte_vlan_type vlan_type,
uint16_t tag_type);
@@ -2615,7 +2765,7 @@ int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
* - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
+int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
/**
* Read VLAN Offload configuration from an Ethernet device
@@ -2629,7 +2779,7 @@ int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
* ETH_VLAN_EXTEND_OFFLOAD
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_vlan_offload(uint8_t port_id);
+int rte_eth_dev_get_vlan_offload(uint16_t port_id);
/**
* Set port based TX VLAN insertion on or off.
@@ -2645,7 +2795,7 @@ int rte_eth_dev_get_vlan_offload(uint8_t port_id);
* - (0) if successful.
* - negative if failed.
*/
-int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
+int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
/**
*
@@ -2730,7 +2880,7 @@ int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
* *rx_pkts* array.
*/
static inline uint16_t
-rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -2775,7 +2925,7 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
* (-ENOTSUP) if the device does not support this function
*/
static inline int
-rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
{
struct rte_eth_dev *dev;
@@ -2804,7 +2954,7 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
* - (-ENOTSUP) if the device does not support this function
*/
static inline int
-rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
+rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -2851,7 +3001,7 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* - (-ENODEV) bad port or queue (only if compiled with debug).
*/
static inline int
-rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
uint16_t offset)
{
struct rte_eth_dev *dev;
@@ -2908,7 +3058,7 @@ rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
* - (-ENOTSUP) if the device does not support this function.
* - (-ENODEV) bad port or queue (only if compiled with debug).
*/
-static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
+static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev;
@@ -2992,7 +3142,7 @@ static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
* *tx_pkts* parameter when the transmit ring is full or has been filled up.
*/
static inline uint16_t
-rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -3081,7 +3231,7 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
static inline uint16_t
-rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
@@ -3123,7 +3273,8 @@ rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
*/
static inline uint16_t
-rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
+rte_eth_tx_prepare(__rte_unused uint16_t port_id,
+ __rte_unused uint16_t queue_id,
__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
return nb_pkts;
@@ -3192,7 +3343,7 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
* callback is called for any packets which could not be sent.
*/
static inline uint16_t
-rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer)
{
uint16_t sent;
@@ -3244,7 +3395,7 @@ rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
* the rest.
*/
static __rte_always_inline uint16_t
-rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
{
buffer->pkts[buffer->length++] = tx_pkt;
@@ -3360,7 +3511,7 @@ rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
* are in use.
*/
int
-rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt);
+rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
/**
* The eth device event type for interrupt, and maybe others in the future.
@@ -3378,7 +3529,7 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
-typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
+typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**< user application callback to be registered for interrupts */
@@ -3400,7 +3551,7 @@ typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_callback_register(uint8_t port_id,
+int rte_eth_dev_callback_register(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
@@ -3421,7 +3572,7 @@ int rte_eth_dev_callback_register(uint8_t port_id,
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_callback_unregister(uint8_t port_id,
+int rte_eth_dev_callback_unregister(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
@@ -3467,7 +3618,7 @@ int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
+int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
/**
* When lcore wakes up from rx interrupt indicating packet coming, disable rx
@@ -3488,7 +3639,7 @@ int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
+int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
/**
* RX Interrupt control per port.
@@ -3507,7 +3658,7 @@ int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
+int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
/**
* RX Interrupt control per queue.
@@ -3530,7 +3681,7 @@ int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
int epfd, int op, void *data);
/**
@@ -3545,7 +3696,7 @@ int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_led_on(uint8_t port_id);
+int rte_eth_led_on(uint16_t port_id);
/**
* Turn off the LED on the Ethernet device.
@@ -3559,7 +3710,7 @@ int rte_eth_led_on(uint8_t port_id);
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_led_off(uint8_t port_id);
+int rte_eth_led_off(uint16_t port_id);
/**
* Get current status of the Ethernet link flow control for Ethernet device
@@ -3573,7 +3724,7 @@ int rte_eth_led_off(uint8_t port_id);
* - (-ENOTSUP) if hardware doesn't support flow control.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
+int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
/**
@@ -3590,7 +3741,7 @@ int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
* - (-EINVAL) if bad parameter
* - (-EIO) if flow control setup failure
*/
-int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
+int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
/**
@@ -3608,7 +3759,7 @@ int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
* - (-EINVAL) if bad parameter
* - (-EIO) if flow control setup failure
*/
-int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
+int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
struct rte_eth_pfc_conf *pfc_conf);
/**
@@ -3629,7 +3780,7 @@ int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
* - (-ENOSPC) if no more MAC addresses can be added.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+int rte_eth_dev_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
uint32_t pool);
/**
@@ -3645,7 +3796,7 @@ int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
* - (-ENODEV) if *port* invalid.
* - (-EADDRINUSE) if attempting to remove the default MAC address
*/
-int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
+int rte_eth_dev_mac_addr_remove(uint16_t port, struct ether_addr *mac_addr);
/**
* Set the default MAC address.
@@ -3660,8 +3811,8 @@ int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
-
+int rte_eth_dev_default_mac_addr_set(uint16_t port,
+ struct ether_addr *mac_addr);
/**
* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
@@ -3678,7 +3829,7 @@ int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_reta_update(uint8_t port,
+int rte_eth_dev_rss_reta_update(uint16_t port,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
@@ -3697,7 +3848,7 @@ int rte_eth_dev_rss_reta_update(uint8_t port,
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_reta_query(uint8_t port,
+int rte_eth_dev_rss_reta_query(uint16_t port,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
@@ -3719,8 +3870,8 @@ int rte_eth_dev_rss_reta_query(uint8_t port,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
- uint8_t on);
+int rte_eth_dev_uc_hash_table_set(uint16_t port, struct ether_addr *addr,
+ uint8_t on);
/**
* Updates all unicast hash bitmaps for receiving packet with any Unicast
@@ -3739,7 +3890,7 @@ int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
+int rte_eth_dev_uc_all_hash_table_set(uint16_t port, uint8_t on);
/**
* Set a traffic mirroring rule on an Ethernet device
@@ -3762,7 +3913,7 @@ int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if the mr_conf information is not correct.
*/
-int rte_eth_mirror_rule_set(uint8_t port_id,
+int rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
@@ -3780,7 +3931,7 @@ int rte_eth_mirror_rule_set(uint8_t port_id,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_mirror_rule_reset(uint8_t port_id,
+int rte_eth_mirror_rule_reset(uint16_t port_id,
uint8_t rule_id);
/**
@@ -3798,7 +3949,7 @@ int rte_eth_mirror_rule_reset(uint8_t port_id,
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
/**
@@ -3814,7 +3965,7 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_hash_update(uint8_t port_id,
+int rte_eth_dev_rss_hash_update(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
/**
@@ -3831,7 +3982,7 @@ int rte_eth_dev_rss_hash_update(uint8_t port_id,
* - (-ENOTSUP) if hardware doesn't support RSS.
*/
int
-rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
/**
@@ -3852,7 +4003,7 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
/**
@@ -3874,7 +4025,7 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
/**
@@ -3890,7 +4041,8 @@ rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
* - (-ENOTSUP) if hardware doesn't support this filter type.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
+int rte_eth_dev_filter_supported(uint16_t port_id,
+ enum rte_filter_type filter_type);
/**
* Take operations to assigned filter type on an Ethernet device.
@@ -3910,7 +4062,7 @@ int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_ty
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
/**
@@ -3925,7 +4077,7 @@ int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
* - (-ENODEV) if port identifier is invalid.
* - (-ENOTSUP) if hardware doesn't support.
*/
-int rte_eth_dev_get_dcb_info(uint8_t port_id,
+int rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info);
/**
@@ -3952,7 +4104,7 @@ int rte_eth_dev_get_dcb_info(uint8_t port_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
@@ -3980,7 +4132,7 @@ void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
@@ -4007,7 +4159,7 @@ void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param);
/**
@@ -4040,7 +4192,7 @@ void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
* - -EINVAL: The port_id or the queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
-int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb);
/**
@@ -4073,7 +4225,7 @@ int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
* - -EINVAL: The port_id or the queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
-int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb);
/**
@@ -4093,7 +4245,7 @@ int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
* - -ENOTSUP: routine is not supported by the device PMD.
* - -EINVAL: The port_id or the queue_id is out of range.
*/
-int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
/**
@@ -4113,7 +4265,7 @@ int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
* - -ENOTSUP: routine is not supported by the device PMD.
* - -EINVAL: The port_id or the queue_id is out of range.
*/
-int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
/**
@@ -4132,7 +4284,7 @@ int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
+int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
/**
* Retrieve size of device EEPROM
@@ -4145,7 +4297,7 @@ int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_eeprom_length(uint8_t port_id);
+int rte_eth_dev_get_eeprom_length(uint16_t port_id);
/**
* Retrieve EEPROM and EEPROM attribute
@@ -4161,7 +4313,7 @@ int rte_eth_dev_get_eeprom_length(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
+int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
/**
* Program EEPROM with provided data
@@ -4177,7 +4329,7 @@ int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
+int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
/**
* Set the list of multicast addresses to filter on an Ethernet device.
@@ -4196,7 +4348,7 @@ int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
* - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
* - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
*/
-int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
@@ -4211,7 +4363,7 @@ int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_enable(uint8_t port_id);
+int rte_eth_timesync_enable(uint16_t port_id);
/**
* Disable IEEE1588/802.1AS timestamping for an Ethernet device.
@@ -4224,7 +4376,7 @@ int rte_eth_timesync_enable(uint8_t port_id);
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_disable(uint8_t port_id);
+int rte_eth_timesync_disable(uint16_t port_id);
/**
* Read an IEEE1588/802.1AS RX timestamp from an Ethernet device.
@@ -4243,7 +4395,7 @@ int rte_eth_timesync_disable(uint8_t port_id);
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
+int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
struct timespec *timestamp, uint32_t flags);
/**
@@ -4260,7 +4412,7 @@ int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
+int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
struct timespec *timestamp);
/**
@@ -4279,7 +4431,7 @@ int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
+int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
/**
* Read the time from the timesync clock on an Ethernet device.
@@ -4295,7 +4447,7 @@ int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
* @return
* - 0: Success.
*/
-int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
+int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
/**
* Set the time of the timesync clock on an Ethernet device.
@@ -4314,7 +4466,7 @@ int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
+int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
/**
* Create memzone for HW rings.
@@ -4355,7 +4507,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
+rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel);
/**
@@ -4382,7 +4534,7 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
+rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
uint32_t mask,
uint8_t en);
@@ -4400,7 +4552,7 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
* - (-ENODEV or -EINVAL) on failure.
*/
int
-rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
+rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
/**
* Get the device name from port id
@@ -4414,7 +4566,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
* - (-EINVAL) on failure.
*/
int
-rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
/**
* Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
@@ -4432,10 +4584,28 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
* - (0) if successful.
* - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
*/
-int rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc);
+
+/**
+ * Test if a port supports specific mempool ops.
+ *
+ * @param port_id
+ * Port identifier of the Ethernet device.
+ * @param [in] pool
+ * The name of the pool operations to test.
+ * @return
+ * - 0: best mempool ops choice for this port.
+ * - 1: mempool ops are supported for this port.
+ * - -ENOTSUP: mempool ops not supported for this port.
+ * - -ENODEV: Invalid port Identifier.
+ * - -EINVAL: Pool param is null.
+ */
+int
+rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
index 56b10721..722075e0 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -36,6 +36,7 @@
#include <rte_malloc.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
/**
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h
index 4d2c3e2b..ff92e6ed 100644
--- a/lib/librte_ether/rte_ethdev_vdev.h
+++ b/lib/librte_ether/rte_ethdev_vdev.h
@@ -35,7 +35,7 @@
#define _RTE_ETHDEV_VDEV_H_
#include <rte_malloc.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_ethdev.h>
/**
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ethdev_version.map
index 42837285..e9681ac8 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ethdev_version.map
@@ -187,3 +187,31 @@ DPDK_17.08 {
rte_tm_wred_profile_delete;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_eth_dev_get_sec_ctx;
+ rte_eth_dev_pool_ops_supported;
+ rte_eth_dev_reset;
+ rte_flow_error_set;
+
+} DPDK_17.08;
+
+EXPERIMENTAL {
+ global:
+
+ rte_mtr_capabilities_get;
+ rte_mtr_create;
+ rte_mtr_destroy;
+ rte_mtr_meter_disable;
+ rte_mtr_meter_dscp_table_update;
+ rte_mtr_meter_enable;
+ rte_mtr_meter_profile_add;
+ rte_mtr_meter_profile_delete;
+ rte_mtr_meter_profile_update;
+ rte_mtr_policer_actions_update;
+ rte_mtr_stats_read;
+ rte_mtr_stats_update;
+
+} DPDK_17.11;
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
index 2001fbbf..66590630 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ether/rte_flow.c
@@ -108,7 +108,7 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
-rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
+rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops;
@@ -132,7 +132,7 @@ rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
/* Check whether a flow rule can be created on a given port. */
int
-rte_flow_validate(uint8_t port_id,
+rte_flow_validate(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -145,14 +145,14 @@ rte_flow_validate(uint8_t port_id,
return -rte_errno;
if (likely(!!ops->validate))
return ops->validate(dev, attr, pattern, actions, error);
- return -rte_flow_error_set(error, ENOSYS,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(ENOSYS));
+ return rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
}
/* Create a flow rule on a given port. */
struct rte_flow *
-rte_flow_create(uint8_t port_id,
+rte_flow_create(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -172,7 +172,7 @@ rte_flow_create(uint8_t port_id,
/* Destroy a flow rule on a given port. */
int
-rte_flow_destroy(uint8_t port_id,
+rte_flow_destroy(uint16_t port_id,
struct rte_flow *flow,
struct rte_flow_error *error)
{
@@ -183,14 +183,14 @@ rte_flow_destroy(uint8_t port_id,
return -rte_errno;
if (likely(!!ops->destroy))
return ops->destroy(dev, flow, error);
- return -rte_flow_error_set(error, ENOSYS,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(ENOSYS));
+ return rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
}
/* Destroy all flow rules associated with a port. */
int
-rte_flow_flush(uint8_t port_id,
+rte_flow_flush(uint16_t port_id,
struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
@@ -200,14 +200,14 @@ rte_flow_flush(uint8_t port_id,
return -rte_errno;
if (likely(!!ops->flush))
return ops->flush(dev, error);
- return -rte_flow_error_set(error, ENOSYS,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(ENOSYS));
+ return rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
}
/* Query an existing flow rule. */
int
-rte_flow_query(uint8_t port_id,
+rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
enum rte_flow_action_type action,
void *data,
@@ -220,14 +220,14 @@ rte_flow_query(uint8_t port_id,
return -rte_errno;
if (likely(!!ops->query))
return ops->query(dev, flow, action, data, error);
- return -rte_flow_error_set(error, ENOSYS,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(ENOSYS));
+ return rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
}
/* Restrict ingress traffic to the defined flow rules. */
int
-rte_flow_isolate(uint8_t port_id,
+rte_flow_isolate(uint16_t port_id,
int set,
struct rte_flow_error *error)
{
@@ -238,9 +238,28 @@ rte_flow_isolate(uint8_t port_id,
return -rte_errno;
if (likely(!!ops->isolate))
return ops->isolate(dev, set, error);
- return -rte_flow_error_set(error, ENOSYS,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, rte_strerror(ENOSYS));
+ return rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/* Initialize flow error structure. */
+int
+rte_flow_error_set(struct rte_flow_error *error,
+ int code,
+ enum rte_flow_error_type type,
+ const void *cause,
+ const char *message)
+{
+ if (error) {
+ *error = (struct rte_flow_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+ rte_errno = code;
+ return -code;
}
/** Compute storage space needed by item specification. */
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index bba6169f..47c88ea5 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -50,6 +50,7 @@
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_byteorder.h>
+#include <rte_esp.h>
#ifdef __cplusplus
extern "C" {
@@ -309,6 +310,40 @@ enum rte_flow_item_type {
* See struct rte_flow_item_fuzzy.
*/
RTE_FLOW_ITEM_TYPE_FUZZY,
+
+ /**
+ * Matches a GTP header.
+ *
+ * Configure flow for GTP packets.
+ *
+ * See struct rte_flow_item_gtp.
+ */
+ RTE_FLOW_ITEM_TYPE_GTP,
+
+ /**
+ * Matches a GTP header.
+ *
+ * Configure flow for GTP-C packets.
+ *
+ * See struct rte_flow_item_gtp.
+ */
+ RTE_FLOW_ITEM_TYPE_GTPC,
+
+ /**
+ * Matches a GTP header.
+ *
+ * Configure flow for GTP-U packets.
+ *
+ * See struct rte_flow_item_gtp.
+ */
+ RTE_FLOW_ITEM_TYPE_GTPU,
+
+ /**
+ * Matches a ESP header.
+ *
+ * See struct rte_flow_item_esp.
+ */
+ RTE_FLOW_ITEM_TYPE_ESP,
};
/**
@@ -735,6 +770,49 @@ static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
#endif
/**
+ * RTE_FLOW_ITEM_TYPE_GTP.
+ *
+ * Matches a GTPv1 header.
+ */
+struct rte_flow_item_gtp {
+ /**
+ * Version (3b), protocol type (1b), reserved (1b),
+ * Extension header flag (1b),
+ * Sequence number flag (1b),
+ * N-PDU number flag (1b).
+ */
+ uint8_t v_pt_rsv_flags;
+ uint8_t msg_type; /**< Message type. */
+ rte_be16_t msg_len; /**< Message length. */
+ rte_be32_t teid; /**< Tunnel endpoint identifier. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
+ .teid = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ESP
+ *
+ * Matches an ESP header.
+ */
+struct rte_flow_item_esp {
+ struct esp_hdr hdr; /**< ESP header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
+ .hdr = {
+ .spi = 0xffffffff,
+ },
+};
+#endif
+
+/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol
@@ -915,6 +993,22 @@ enum rte_flow_action_type {
* See struct rte_flow_action_vf.
*/
RTE_FLOW_ACTION_TYPE_VF,
+
+ /**
+ * Traffic metering and policing (MTR).
+ *
+ * See struct rte_flow_action_meter.
+ * See file rte_mtr.h for MTR object configuration.
+ */
+ RTE_FLOW_ACTION_TYPE_METER,
+
+ /**
+ * Redirects packets to security engine of current device for security
+ * processing as specified by security session.
+ *
+ * See struct rte_flow_action_security.
+ */
+ RTE_FLOW_ACTION_TYPE_SECURITY
};
/**
@@ -1008,6 +1102,51 @@ struct rte_flow_action_vf {
};
/**
+ * RTE_FLOW_ACTION_TYPE_METER
+ *
+ * Traffic metering and policing (MTR).
+ *
+ * Packets matched by items of this type can be either dropped or passed to the
+ * next item with their color set by the MTR object.
+ *
+ * Non-terminating by default.
+ */
+struct rte_flow_action_meter {
+ uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_SECURITY
+ *
+ * Perform the security action on flows matched by the pattern items
+ * according to the configuration of the security session.
+ *
+ * This action modifies the payload of matched flows. For INLINE_CRYPTO, the
+ * security protocol headers and IV are fully provided by the application as
+ * specified in the flow pattern. The payload of matching packets is
+ * encrypted on egress, and decrypted and authenticated on ingress.
+ * For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
+ * providing full encapsulation and decapsulation of packets in security
+ * protocols. The flow pattern specifies both the outer security header fields
+ * and the inner packet fields. The security session specified in the action
+ * must match the pattern parameters.
+ *
+ * The security session specified in the action must be created on the same
+ * port as the flow action that is being specified.
+ *
+ * The ingress/egress flow attribute should match that specified in the
+ * security session if the security session supports the definition of the
+ * direction.
+ *
+ * Multiple flows can be configured to use the same security session.
+ *
+ * Non-terminating by default.
+ */
+struct rte_flow_action_security {
+ void *security_session; /**< Pointer to security session structure. */
+};
+
+/**
* Definition of a single action.
*
* A list of actions is terminated by a END action.
@@ -1116,7 +1255,7 @@ struct rte_flow_error {
* state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
*/
int
-rte_flow_validate(uint8_t port_id,
+rte_flow_validate(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1143,7 +1282,7 @@ rte_flow_validate(uint8_t port_id,
* rte_flow_validate().
*/
struct rte_flow *
-rte_flow_create(uint8_t port_id,
+rte_flow_create(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
@@ -1170,7 +1309,7 @@ rte_flow_create(uint8_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-rte_flow_destroy(uint8_t port_id,
+rte_flow_destroy(uint16_t port_id,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -1191,7 +1330,7 @@ rte_flow_destroy(uint8_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-rte_flow_flush(uint8_t port_id,
+rte_flow_flush(uint16_t port_id,
struct rte_flow_error *error);
/**
@@ -1219,7 +1358,7 @@ rte_flow_flush(uint8_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-rte_flow_query(uint8_t port_id,
+rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
enum rte_flow_action_type action,
void *data,
@@ -1267,7 +1406,31 @@ rte_flow_query(uint8_t port_id,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-rte_flow_isolate(uint8_t port_id, int set, struct rte_flow_error *error);
+rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
+
+/**
+ * Initialize flow error structure.
+ *
+ * @param[out] error
+ * Pointer to flow error structure (may be NULL).
+ * @param code
+ * Related error code (rte_errno).
+ * @param type
+ * Cause field and error types.
+ * @param cause
+ * Object responsible for the error.
+ * @param message
+ * Human-readable error message.
+ *
+ * @return
+ * Negative error code (errno value) and rte_errno is set.
+ */
+int
+rte_flow_error_set(struct rte_flow_error *error,
+ int code,
+ enum rte_flow_error_type type,
+ const void *cause,
+ const char *message);
/**
* Generic flow representation.
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
index 4d95391d..254d1cb2 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -45,7 +45,6 @@
#include <stdint.h>
-#include <rte_errno.h>
#include "rte_ethdev.h"
#include "rte_flow.h"
@@ -128,43 +127,6 @@ struct rte_flow_ops {
};
/**
- * Initialize generic flow error structure.
- *
- * This function also sets rte_errno to a given value.
- *
- * @param[out] error
- * Pointer to flow error structure (may be NULL).
- * @param code
- * Related error code (rte_errno).
- * @param type
- * Cause field and error types.
- * @param cause
- * Object responsible for the error.
- * @param message
- * Human-readable error message.
- *
- * @return
- * Error code.
- */
-static inline int
-rte_flow_error_set(struct rte_flow_error *error,
- int code,
- enum rte_flow_error_type type,
- const void *cause,
- const char *message)
-{
- if (error) {
- *error = (struct rte_flow_error){
- .type = type,
- .cause = cause,
- .message = message,
- };
- }
- rte_errno = code;
- return code;
-}
-
-/**
* Get generic flow operations structure from a port.
*
* @param port_id
@@ -178,7 +140,7 @@ rte_flow_error_set(struct rte_flow_error *error,
* additional details.
*/
const struct rte_flow_ops *
-rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error);
+rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error);
#ifdef __cplusplus
}
diff --git a/lib/librte_ether/rte_mtr.c b/lib/librte_ether/rte_mtr.c
new file mode 100644
index 00000000..4f56f871
--- /dev/null
+++ b/lib/librte_ether/rte_mtr.c
@@ -0,0 +1,229 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_mtr_driver.h"
+#include "rte_mtr.h"
+
+/* Get generic traffic metering & policing operations structure from a port. */
+const struct rte_mtr_ops *
+rte_mtr_ops_get(uint16_t port_id, struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_mtr_ops *ops;
+
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ rte_mtr_error_set(error,
+ ENODEV,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+
+ if ((dev->dev_ops->mtr_ops_get == NULL) ||
+ (dev->dev_ops->mtr_ops_get(dev, &ops) != 0) ||
+ (ops == NULL)) {
+ rte_mtr_error_set(error,
+ ENOSYS,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+ }
+
+ return ops;
+}
+
+#define RTE_MTR_FUNC(port_id, func) \
+({ \
+ const struct rte_mtr_ops *ops = \
+ rte_mtr_ops_get(port_id, error); \
+ if (ops == NULL) \
+ return -rte_errno; \
+ \
+ if (ops->func == NULL) \
+ return -rte_mtr_error_set(error, \
+ ENOSYS, \
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED, \
+ NULL, \
+ rte_strerror(ENOSYS)); \
+ \
+ ops->func; \
+})
+
+/* MTR capabilities get */
+int
+rte_mtr_capabilities_get(uint16_t port_id,
+ struct rte_mtr_capabilities *cap,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, capabilities_get)(dev,
+ cap, error);
+}
+
+/* MTR meter profile add */
+int
+rte_mtr_meter_profile_add(uint16_t port_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_profile_add)(dev,
+ meter_profile_id, profile, error);
+}
+
+/** MTR meter profile delete */
+int
+rte_mtr_meter_profile_delete(uint16_t port_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_profile_delete)(dev,
+ meter_profile_id, error);
+}
+
+/** MTR object create */
+int
+rte_mtr_create(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, create)(dev,
+ mtr_id, params, shared, error);
+}
+
+/** MTR object destroy */
+int
+rte_mtr_destroy(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, destroy)(dev,
+ mtr_id, error);
+}
+
+/** MTR object meter enable */
+int
+rte_mtr_meter_enable(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_enable)(dev,
+ mtr_id, error);
+}
+
+/** MTR object meter disable */
+int
+rte_mtr_meter_disable(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_disable)(dev,
+ mtr_id, error);
+}
+
+/** MTR object meter profile update */
+int
+rte_mtr_meter_profile_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_profile_update)(dev,
+ mtr_id, meter_profile_id, error);
+}
+
+/** MTR object meter DSCP table update */
+int
+rte_mtr_meter_dscp_table_update(uint16_t port_id,
+ uint32_t mtr_id,
+ enum rte_mtr_color *dscp_table,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, meter_dscp_table_update)(dev,
+ mtr_id, dscp_table, error);
+}
+
+/** MTR object policer action update */
+int
+rte_mtr_policer_actions_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint32_t action_mask,
+ enum rte_mtr_policer_action *actions,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, policer_actions_update)(dev,
+ mtr_id, action_mask, actions, error);
+}
+
+/** MTR object enabled stats update */
+int
+rte_mtr_stats_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint64_t stats_mask,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, stats_update)(dev,
+ mtr_id, stats_mask, error);
+}
+
+/** MTR object stats read */
+int
+rte_mtr_stats_read(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ return RTE_MTR_FUNC(port_id, stats_read)(dev,
+ mtr_id, stats, stats_mask, clear, error);
+}
diff --git a/lib/librte_ether/rte_mtr.h b/lib/librte_ether/rte_mtr.h
new file mode 100644
index 00000000..f6b6ef3b
--- /dev/null
+++ b/lib/librte_ether/rte_mtr.h
@@ -0,0 +1,730 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 Intel Corporation
+ * Copyright 2017 NXP
+ * Copyright 2017 Cavium
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_MTR_H__
+#define __INCLUDE_RTE_MTR_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Metering and Policing API
+ *
+ * This interface provides the ability to configure the traffic metering and
+ * policing (MTR) in a generic way.
+ *
+ * The processing done for each input packet hitting a MTR object is:
+ * A) Traffic metering: The packet is assigned a color (the meter output
+ * color), based on the previous history of the flow reflected in the
+ * current state of the MTR object, according to the specific traffic
+ * metering algorithm. The traffic metering algorithm can typically work
+ * in color aware mode, in which case the input packet already has an
+ * initial color (the input color), or in color blind mode, which is
+ * equivalent to considering all input packets initially colored as green.
+ * B) Policing: There is a separate policer action configured for each meter
+ * output color, which can:
+ * a) Drop the packet.
+ * b) Keep the same packet color: the policer output color matches the
+ * meter output color (essentially a no-op action).
+ * c) Recolor the packet: the policer output color is different than
+ * the meter output color.
+ * The policer output color is the output color of the packet, which is
+ * set in the packet meta-data (i.e. struct rte_mbuf::sched::color).
+ * C) Statistics: The set of counters maintained for each MTR object is
+ * configurable and subject to the implementation support. This set
+ * includes the number of packets and bytes dropped or passed for each
+ * output color.
+ *
+ * Once successfully created, an MTR object is linked to one or several flows
+ * through the meter action of the flow API.
+ * A) Whether an MTR object is private to a flow or potentially shared by
+ * several flows has to be specified at creation time.
+ * B) Several meter actions can be potentially registered for the same flow.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Color
+ */
+enum rte_mtr_color {
+ RTE_MTR_GREEN = 0, /**< Green */
+ RTE_MTR_YELLOW, /**< Yellow */
+ RTE_MTR_RED, /**< Red */
+ RTE_MTR_COLORS /**< Number of colors. */
+};
+
+/**
+ * Statistics counter type
+ */
+enum rte_mtr_stats_type {
+ /** Number of packets passed as green by the policer. */
+ RTE_MTR_STATS_N_PKTS_GREEN = 1 << 0,
+
+ /** Number of packets passed as yellow by the policer. */
+ RTE_MTR_STATS_N_PKTS_YELLOW = 1 << 1,
+
+ /** Number of packets passed as red by the policer. */
+ RTE_MTR_STATS_N_PKTS_RED = 1 << 2,
+
+ /** Number of packets dropped by the policer. */
+ RTE_MTR_STATS_N_PKTS_DROPPED = 1 << 3,
+
+ /** Number of bytes passed as green by the policer. */
+ RTE_MTR_STATS_N_BYTES_GREEN = 1 << 4,
+
+ /** Number of bytes passed as yellow by the policer. */
+ RTE_MTR_STATS_N_BYTES_YELLOW = 1 << 5,
+
+ /** Number of bytes passed as red by the policer. */
+ RTE_MTR_STATS_N_BYTES_RED = 1 << 6,
+
+ /** Number of bytes dropped by the policer. */
+ RTE_MTR_STATS_N_BYTES_DROPPED = 1 << 7,
+};
+
+/**
+ * Statistics counters
+ */
+struct rte_mtr_stats {
+ /** Number of packets passed by the policer (per color). */
+ uint64_t n_pkts[RTE_MTR_COLORS];
+
+ /** Number of bytes passed by the policer (per color). */
+ uint64_t n_bytes[RTE_MTR_COLORS];
+
+ /** Number of packets dropped by the policer. */
+ uint64_t n_pkts_dropped;
+
+ /** Number of bytes passed by the policer. */
+ uint64_t n_bytes_dropped;
+};
+
+/**
+ * Traffic metering algorithms
+ */
+enum rte_mtr_algorithm {
+ /** No traffic metering performed, the output color is the same as the
+ * input color for every input packet. The meter of the MTR object is
+ * working in pass-through mode, having same effect as meter disable.
+ * @see rte_mtr_meter_disable()
+ */
+ RTE_MTR_NONE = 0,
+
+ /** Single Rate Three Color Marker (srTCM) - IETF RFC 2697. */
+ RTE_MTR_SRTCM_RFC2697,
+
+ /** Two Rate Three Color Marker (trTCM) - IETF RFC 2698. */
+ RTE_MTR_TRTCM_RFC2698,
+
+ /** Two Rate Three Color Marker (trTCM) - IETF RFC 4115. */
+ RTE_MTR_TRTCM_RFC4115,
+};
+
+/**
+ * Meter profile
+ */
+struct rte_mtr_meter_profile {
+ /** Traffic metering algorithm. */
+ enum rte_mtr_algorithm alg;
+
+ RTE_STD_C11
+ union {
+ /** Items only valid when *alg* is set to srTCM - RFC 2697. */
+ struct {
+ /** Committed Information Rate (CIR) (bytes/second). */
+ uint64_t cir;
+
+ /** Committed Burst Size (CBS) (bytes). */
+ uint64_t cbs;
+
+ /** Excess Burst Size (EBS) (bytes). */
+ uint64_t ebs;
+ } srtcm_rfc2697;
+
+ /** Items only valid when *alg* is set to trTCM - RFC 2698. */
+ struct {
+ /** Committed Information Rate (CIR) (bytes/second). */
+ uint64_t cir;
+
+ /** Peak Information Rate (PIR) (bytes/second). */
+ uint64_t pir;
+
+ /** Committed Burst Size (CBS) (byes). */
+ uint64_t cbs;
+
+ /** Peak Burst Size (PBS) (bytes). */
+ uint64_t pbs;
+ } trtcm_rfc2698;
+
+ /** Items only valid when *alg* is set to trTCM - RFC 4115. */
+ struct {
+ /** Committed Information Rate (CIR) (bytes/second). */
+ uint64_t cir;
+
+ /** Excess Information Rate (EIR) (bytes/second). */
+ uint64_t eir;
+
+ /** Committed Burst Size (CBS) (byes). */
+ uint64_t cbs;
+
+ /** Excess Burst Size (EBS) (bytes). */
+ uint64_t ebs;
+ } trtcm_rfc4115;
+ };
+};
+
+/**
+ * Policer actions
+ */
+enum rte_mtr_policer_action {
+ /** Recolor the packet as green. */
+ MTR_POLICER_ACTION_COLOR_GREEN = 0,
+
+ /** Recolor the packet as yellow. */
+ MTR_POLICER_ACTION_COLOR_YELLOW,
+
+ /** Recolor the packet as red. */
+ MTR_POLICER_ACTION_COLOR_RED,
+
+ /** Drop the packet. */
+ MTR_POLICER_ACTION_DROP,
+};
+
+/**
+ * Parameters for each traffic metering & policing object
+ *
+ * @see enum rte_mtr_stats_type
+ */
+struct rte_mtr_params {
+ /** Meter profile ID. */
+ uint32_t meter_profile_id;
+
+ /** Meter input color in case of MTR object chaining. When non-zero: if
+ * a previous MTR object is enabled in the same flow, then the color
+ * determined by the latest MTR object in the same flow is used as the
+ * input color by the current MTR object, otherwise the current MTR
+ * object uses the *dscp_table* to determine the input color. When zero:
+ * the color determined by any previous MTR object in same flow is
+ * ignored by the current MTR object, which uses the *dscp_table* to
+ * determine the input color.
+ */
+ int use_prev_mtr_color;
+
+ /** Meter input color. When non-NULL: it points to a pre-allocated and
+ * pre-populated table with exactly 64 elements providing the input
+ * color for each value of the IPv4/IPv6 Differentiated Services Code
+ * Point (DSCP) input packet field. When NULL: it is equivalent to
+ * setting this parameter to an all-green populated table (i.e. table
+ * with all the 64 elements set to green color). The color blind mode
+ * is configured by setting *use_prev_mtr_color* to 0 and *dscp_table*
+ * to either NULL or to an all-green populated table. When
+ * *use_prev_mtr_color* is non-zero value or when *dscp_table* contains
+ * at least one yellow or red color element, then the color aware mode
+ * is configured.
+ */
+ enum rte_mtr_color *dscp_table;
+
+ /** Non-zero to enable the meter, zero to disable the meter at the time
+ * of MTR object creation. Ignored when the meter profile indicated by
+ * *meter_profile_id* is set to NONE.
+ * @see rte_mtr_meter_disable()
+ */
+ int meter_enable;
+
+ /** Policer actions (per meter output color). */
+ enum rte_mtr_policer_action action[RTE_MTR_COLORS];
+
+ /** Set of stats counters to be enabled.
+ * @see enum rte_mtr_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * MTR capabilities
+ */
+struct rte_mtr_capabilities {
+ /** Maximum number of MTR objects. */
+ uint32_t n_max;
+
+ /** Maximum number of MTR objects that can be shared by multiple flows.
+ * The value of zero indicates that shared MTR objects are not
+ * supported. The maximum value is *n_max*.
+ */
+ uint32_t n_shared_max;
+
+ /** When non-zero, this flag indicates that all the MTR objects that
+ * cannot be shared by multiple flows have identical capability set.
+ */
+ int identical;
+
+ /** When non-zero, this flag indicates that all the MTR objects that
+ * can be shared by multiple flows have identical capability set.
+ */
+ int shared_identical;
+
+ /** Maximum number of flows that can share the same MTR object. The
+ * value of zero is invalid. The value of 1 means that shared MTR
+ * objects not supported.
+ */
+ uint32_t shared_n_flows_per_mtr_max;
+
+ /** Maximum number of MTR objects that can be part of the same flow. The
+ * value of zero is invalid. The value of 1 indicates that MTR object
+ * chaining is not supported. The maximum value is *n_max*.
+ */
+ uint32_t chaining_n_mtrs_per_flow_max;
+
+ /**
+ * When non-zero, it indicates that the packet color identified by one
+ * MTR object can be used as the packet input color by any subsequent
+ * MTR object from the same flow. When zero, it indicates that the color
+ * determined by one MTR object is always ignored by any subsequent MTR
+ * object from the same flow. Only valid when MTR chaining is supported,
+ * i.e. *chaining_n_mtrs_per_flow_max* is greater than 1. When non-zero,
+ * it also means that the color aware mode is supported by at least one
+ * metering algorithm.
+ */
+ int chaining_use_prev_mtr_color_supported;
+
+ /**
+ * When non-zero, it indicates that the packet color identified by one
+ * MTR object is always used as the packet input color by any subsequent
+ * MTR object that is part of the same flow. When zero, it indicates
+ * that whether the color determined by one MTR object is either ignored
+ * or used as the packet input color by any subsequent MTR object from
+ * the same flow is individually configurable for each MTR object. Only
+ * valid when *chaining_use_prev_mtr_color_supported* is non-zero.
+ */
+ int chaining_use_prev_mtr_color_enforced;
+
+ /** Maximum number of MTR objects that can have their meter configured
+ * to run the srTCM RFC 2697 algorithm. The value of 0 indicates this
+ * metering algorithm is not supported. The maximum value is *n_max*.
+ */
+ uint32_t meter_srtcm_rfc2697_n_max;
+
+ /** Maximum number of MTR objects that can have their meter configured
+ * to run the trTCM RFC 2698 algorithm. The value of 0 indicates this
+ * metering algorithm is not supported. The maximum value is *n_max*.
+ */
+ uint32_t meter_trtcm_rfc2698_n_max;
+
+ /** Maximum number of MTR objects that can have their meter configured
+ * to run the trTCM RFC 4115 algorithm. The value of 0 indicates this
+ * metering algorithm is not supported. The maximum value is *n_max*.
+ */
+ uint32_t meter_trtcm_rfc4115_n_max;
+
+ /** Maximum traffic rate that can be metered by a single MTR object. For
+ * srTCM RFC 2697, this is the maximum CIR rate. For trTCM RFC 2698,
+ * this is the maximum PIR rate. For trTCM RFC 4115, this is the maximum
+ * value for the sum of PIR and EIR rates.
+ */
+ uint64_t meter_rate_max;
+
+ /**
+ * When non-zero, it indicates that color aware mode is supported for
+ * the srTCM RFC 2697 metering algorithm.
+ */
+ int color_aware_srtcm_rfc2697_supported;
+
+ /**
+ * When non-zero, it indicates that color aware mode is supported for
+ * the trTCM RFC 2698 metering algorithm.
+ */
+ int color_aware_trtcm_rfc2698_supported;
+
+ /**
+ * When non-zero, it indicates that color aware mode is supported for
+ * the trTCM RFC 4115 metering algorithm.
+ */
+ int color_aware_trtcm_rfc4115_supported;
+
+ /** When non-zero, it indicates that the policer packet recolor actions
+ * are supported.
+ * @see enum rte_mtr_policer_action
+ */
+ int policer_action_recolor_supported;
+
+ /** When non-zero, it indicates that the policer packet drop action is
+ * supported.
+ * @see enum rte_mtr_policer_action
+ */
+ int policer_action_drop_supported;
+
+ /** Set of supported statistics counter types.
+ * @see enum rte_mtr_stats_type
+ */
+ uint64_t stats_mask;
+};
+
+/**
+ * Verbose error types.
+ *
+ * Most of them provide the type of the object referenced by struct
+ * rte_mtr_error::cause.
+ */
+enum rte_mtr_error_type {
+ RTE_MTR_ERROR_TYPE_NONE, /**< No error. */
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
+ RTE_MTR_ERROR_TYPE_POLICER_ACTION_YELLOW,
+ RTE_MTR_ERROR_TYPE_POLICER_ACTION_RED,
+ RTE_MTR_ERROR_TYPE_STATS_MASK,
+ RTE_MTR_ERROR_TYPE_STATS,
+ RTE_MTR_ERROR_TYPE_SHARED,
+};
+
+/**
+ * Verbose error structure definition.
+ *
+ * This object is normally allocated by applications and set by PMDs, the
+ * message points to a constant string which does not need to be freed by
+ * the application, however its pointer can be considered valid only as long
+ * as its associated DPDK port remains configured. Closing the underlying
+ * device or unloading the PMD invalidates it.
+ *
+ * Both cause and message may be NULL regardless of the error type.
+ */
+struct rte_mtr_error {
+ enum rte_mtr_error_type type; /**< Cause field and error type. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+};
+
+/**
+ * MTR capabilities get
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] cap
+ * MTR capabilities. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_capabilities_get(uint16_t port_id,
+ struct rte_mtr_capabilities *cap,
+ struct rte_mtr_error *error);
+
+/**
+ * Meter profile add
+ *
+ * Create a new meter profile with ID set to *meter_profile_id*. The new profile
+ * is used to create one or several MTR objects.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] meter_profile_id
+ * ID for the new meter profile. Needs to be unused by any of the existing
+ * meter profiles added for the current port.
+ * @param[in] profile
+ * Meter profile parameters. Needs to be pre-allocated and valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_profile_add(uint16_t port_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error);
+
+/**
+ * Meter profile delete
+ *
+ * Delete an existing meter profile. This operation fails when there is
+ * currently at least one user (i.e. MTR object) of this profile.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] meter_profile_id
+ * Meter profile ID. Needs to be the valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_profile_delete(uint16_t port_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object create
+ *
+ * Create a new MTR object for the current port. This object is run as part of
+ * associated flow action for traffic metering and policing.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be unused by any of the existing MTR objects.
+ * created for the current port.
+ * @param[in] params
+ * MTR object params. Needs to be pre-allocated and valid.
+ * @param[in] shared
+ * Non-zero when this MTR object can be shared by multiple flows, zero when
+ * this MTR object can be used by a single flow.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_flow_action_type::RTE_FLOW_ACTION_TYPE_METER
+ */
+int
+rte_mtr_create(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object destroy
+ *
+ * Delete an existing MTR object. This operation fails when there is currently
+ * at least one user (i.e. flow) of this MTR object.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * created for the current port.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_destroy(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object meter disable
+ *
+ * Disable the meter of an existing MTR object. In disabled state, the meter of
+ * the current MTR object works in pass-through mode, meaning that for each
+ * input packet the meter output color is always the same as the input color. In
+ * particular, when the meter of the current MTR object is configured in color
+ * blind mode, the input color is always green, so the meter output color is
+ * also always green. Note that the policer and the statistics of the current
+ * MTR object are working as usual while the meter is disabled. No action is
+ * taken and this function returns successfully when the meter of the current
+ * MTR object is already disabled.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_disable(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object meter enable
+ *
+ * Enable the meter of an existing MTR object. If the MTR object has its meter
+ * already enabled, then no action is taken and this function returns
+ * successfully.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_enable(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object meter profile update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * @param[in] meter_profile_id
+ * Meter profile ID for the current MTR object. Needs to be valid.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_profile_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object DSCP table update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * @param[in] dscp_table
+ * When non-NULL: it points to a pre-allocated and pre-populated table with
+ * exactly 64 elements providing the input color for each value of the
+ * IPv4/IPv6 Differentiated Services Code Point (DSCP) input packet field.
+ * When NULL: it is equivalent to setting this parameter to an “all-green”
+ * populated table (i.e. table with all the 64 elements set to green color).
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_meter_dscp_table_update(uint16_t port_id,
+ uint32_t mtr_id,
+ enum rte_mtr_color *dscp_table,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object policer actions update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * @param[in] action_mask
+ * Bit mask indicating which policer actions need to be updated. One or more
+ * policer actions can be updated in a single function invocation. To update
+ * the policer action associated with color C, bit (1 << C) needs to be set in
+ * *action_mask* and element at position C in the *actions* array needs to be
+ * valid.
+ * @param[in] actions
+ * Pre-allocated and pre-populated array of policer actions.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ */
+int
+rte_mtr_policer_actions_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint32_t action_mask,
+ enum rte_mtr_policer_action *actions,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object enabled statistics counters update
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * @param[in] stats_mask
+ * Mask of statistics counter types to be enabled for the current MTR object.
+ * Any statistics counter type not included in this set is to be disabled for
+ * the current MTR object.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_mtr_stats_type
+ */
+int
+rte_mtr_stats_update(uint16_t port_id,
+ uint32_t mtr_id,
+ uint64_t stats_mask,
+ struct rte_mtr_error *error);
+
+/**
+ * MTR object statistics counters read
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] mtr_id
+ * MTR object ID. Needs to be valid.
+ * @param[out] stats
+ * When non-NULL, it contains the current value for the statistics counters
+ * enabled for the current MTR object.
+ * @param[out] stats_mask
+ * When non-NULL, it contains the mask of statistics counter types that are
+ * currently enabled for this MTR object, indicating which of the counters
+ * retrieved with the *stats* structure are valid.
+ * @param[in] clear
+ * When this parameter has a non-zero value, the statistics counters are
+ * cleared (i.e. set to zero) immediately after they have been read,
+ * otherwise the statistics counters are left untouched.
+ * @param[out] error
+ * Error details. Filled in only on error, when not NULL.
+ * @return
+ * 0 on success, non-zero error code otherwise.
+ *
+ * @see enum rte_mtr_stats_type
+ */
+int
+rte_mtr_stats_read(uint16_t port_id,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_MTR_H__ */
diff --git a/lib/librte_ether/rte_mtr_driver.h b/lib/librte_ether/rte_mtr_driver.h
new file mode 100644
index 00000000..6a289ef1
--- /dev/null
+++ b/lib/librte_ether/rte_mtr_driver.h
@@ -0,0 +1,221 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_MTR_DRIVER_H__
+#define __INCLUDE_RTE_MTR_DRIVER_H__
+
+/**
+ * @file
+ * RTE Generic Traffic Metering and Policing API (Driver Side)
+ *
+ * This file provides implementation helpers for internal use by PMDs, they
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_mtr.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_mtr_capabilities_get_t)(struct rte_eth_dev *dev,
+ struct rte_mtr_capabilities *cap,
+ struct rte_mtr_error *error);
+/**< @internal MTR capabilities get */
+
+typedef int (*rte_mtr_meter_profile_add_t)(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error);
+/**< @internal MTR meter profile add */
+
+typedef int (*rte_mtr_meter_profile_delete_t)(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error);
+/**< @internal MTR meter profile delete */
+
+typedef int (*rte_mtr_create_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error);
+/**< @internal MTR object create */
+
+typedef int (*rte_mtr_destroy_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+/**< @internal MTR object destroy */
+
+typedef int (*rte_mtr_meter_enable_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+/**< @internal MTR object meter enable */
+
+typedef int (*rte_mtr_meter_disable_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error);
+/**< @internal MTR object meter disable */
+
+typedef int (*rte_mtr_meter_profile_update_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error);
+/**< @internal MTR object meter profile update */
+
+typedef int (*rte_mtr_meter_dscp_table_update_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ enum rte_mtr_color *dscp_table,
+ struct rte_mtr_error *error);
+/**< @internal MTR object meter DSCP table update */
+
+typedef int (*rte_mtr_policer_actions_update_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t action_mask,
+ enum rte_mtr_policer_action *actions,
+ struct rte_mtr_error *error);
+/**< @internal MTR object policer action update*/
+
+typedef int (*rte_mtr_stats_update_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint64_t stats_mask,
+ struct rte_mtr_error *error);
+/**< @internal MTR object enabled stats update */
+
+typedef int (*rte_mtr_stats_read_t)(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error);
+/**< @internal MTR object stats read */
+
+struct rte_mtr_ops {
+ /** MTR capabilities get */
+ rte_mtr_capabilities_get_t capabilities_get;
+
+ /** MTR meter profile add */
+ rte_mtr_meter_profile_add_t meter_profile_add;
+
+ /** MTR meter profile delete */
+ rte_mtr_meter_profile_delete_t meter_profile_delete;
+
+ /** MTR object create */
+ rte_mtr_create_t create;
+
+ /** MTR object destroy */
+ rte_mtr_destroy_t destroy;
+
+ /** MTR object meter enable */
+ rte_mtr_meter_enable_t meter_enable;
+
+ /** MTR object meter disable */
+ rte_mtr_meter_disable_t meter_disable;
+
+ /** MTR object meter profile update */
+ rte_mtr_meter_profile_update_t meter_profile_update;
+
+ /** MTR object meter DSCP table update */
+ rte_mtr_meter_dscp_table_update_t meter_dscp_table_update;
+
+ /** MTR object policer action update */
+ rte_mtr_policer_actions_update_t policer_actions_update;
+
+ /** MTR object enabled stats update */
+ rte_mtr_stats_update_t stats_update;
+
+ /** MTR object stats read */
+ rte_mtr_stats_read_t stats_read;
+};
+
+/**
+ * Initialize generic error structure.
+ *
+ * This function also sets rte_errno to a given value.
+ *
+ * @param[out] error
+ * Pointer to error structure (may be NULL).
+ * @param[in] code
+ * Related error code (rte_errno).
+ * @param[in] type
+ * Cause field and error type.
+ * @param[in] cause
+ * Object responsible for the error.
+ * @param[in] message
+ * Human-readable error message.
+ *
+ * @return
+ * Error code.
+ */
+static inline int
+rte_mtr_error_set(struct rte_mtr_error *error,
+ int code,
+ enum rte_mtr_error_type type,
+ const void *cause,
+ const char *message)
+{
+ if (error) {
+ *error = (struct rte_mtr_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+ rte_errno = code;
+ return code;
+}
+
+/**
+ * Get generic traffic metering and policing operations structure from a port
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[out] error
+ * Error details
+ *
+ * @return
+ * The traffic metering and policing operations structure associated with
+ * port_id on success, NULL otherwise.
+ */
+const struct rte_mtr_ops *
+rte_mtr_ops_get(uint16_t port_id, struct rte_mtr_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_MTR_DRIVER_H__ */
diff --git a/lib/librte_ether/rte_tm.c b/lib/librte_ether/rte_tm.c
index 71679650..ceac3411 100644
--- a/lib/librte_ether/rte_tm.c
+++ b/lib/librte_ether/rte_tm.c
@@ -40,7 +40,7 @@
/* Get generic traffic manager operations structure from a port. */
const struct rte_tm_ops *
-rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
+rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_tm_ops *ops;
@@ -87,7 +87,7 @@ rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
/* Get number of leaf nodes */
int
-rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
uint32_t *n_leaf_nodes,
struct rte_tm_error *error)
{
@@ -113,7 +113,7 @@ rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
/* Check node type (leaf or non-leaf) */
int
-rte_tm_node_type_get(uint8_t port_id,
+rte_tm_node_type_get(uint16_t port_id,
uint32_t node_id,
int *is_leaf,
struct rte_tm_error *error)
@@ -124,7 +124,7 @@ rte_tm_node_type_get(uint8_t port_id,
}
/* Get capabilities */
-int rte_tm_capabilities_get(uint8_t port_id,
+int rte_tm_capabilities_get(uint16_t port_id,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
@@ -134,7 +134,7 @@ int rte_tm_capabilities_get(uint8_t port_id,
}
/* Get level capabilities */
-int rte_tm_level_capabilities_get(uint8_t port_id,
+int rte_tm_level_capabilities_get(uint16_t port_id,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
@@ -145,7 +145,7 @@ int rte_tm_level_capabilities_get(uint8_t port_id,
}
/* Get node capabilities */
-int rte_tm_node_capabilities_get(uint8_t port_id,
+int rte_tm_node_capabilities_get(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_capabilities *cap,
struct rte_tm_error *error)
@@ -156,7 +156,7 @@ int rte_tm_node_capabilities_get(uint8_t port_id,
}
/* Add WRED profile */
-int rte_tm_wred_profile_add(uint8_t port_id,
+int rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_wred_params *profile,
struct rte_tm_error *error)
@@ -167,7 +167,7 @@ int rte_tm_wred_profile_add(uint8_t port_id,
}
/* Delete WRED profile */
-int rte_tm_wred_profile_delete(uint8_t port_id,
+int rte_tm_wred_profile_delete(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
{
@@ -177,7 +177,7 @@ int rte_tm_wred_profile_delete(uint8_t port_id,
}
/* Add/update shared WRED context */
-int rte_tm_shared_wred_context_add_update(uint8_t port_id,
+int rte_tm_shared_wred_context_add_update(uint16_t port_id,
uint32_t shared_wred_context_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
@@ -188,7 +188,7 @@ int rte_tm_shared_wred_context_add_update(uint8_t port_id,
}
/* Delete shared WRED context */
-int rte_tm_shared_wred_context_delete(uint8_t port_id,
+int rte_tm_shared_wred_context_delete(uint16_t port_id,
uint32_t shared_wred_context_id,
struct rte_tm_error *error)
{
@@ -198,7 +198,7 @@ int rte_tm_shared_wred_context_delete(uint8_t port_id,
}
/* Add shaper profile */
-int rte_tm_shaper_profile_add(uint8_t port_id,
+int rte_tm_shaper_profile_add(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_shaper_params *profile,
struct rte_tm_error *error)
@@ -209,7 +209,7 @@ int rte_tm_shaper_profile_add(uint8_t port_id,
}
/* Delete WRED profile */
-int rte_tm_shaper_profile_delete(uint8_t port_id,
+int rte_tm_shaper_profile_delete(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
{
@@ -219,7 +219,7 @@ int rte_tm_shaper_profile_delete(uint8_t port_id,
}
/* Add shared shaper */
-int rte_tm_shared_shaper_add_update(uint8_t port_id,
+int rte_tm_shared_shaper_add_update(uint16_t port_id,
uint32_t shared_shaper_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
@@ -230,7 +230,7 @@ int rte_tm_shared_shaper_add_update(uint8_t port_id,
}
/* Delete shared shaper */
-int rte_tm_shared_shaper_delete(uint8_t port_id,
+int rte_tm_shared_shaper_delete(uint16_t port_id,
uint32_t shared_shaper_id,
struct rte_tm_error *error)
{
@@ -240,7 +240,7 @@ int rte_tm_shared_shaper_delete(uint8_t port_id,
}
/* Add node to port traffic manager hierarchy */
-int rte_tm_node_add(uint8_t port_id,
+int rte_tm_node_add(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
@@ -256,7 +256,7 @@ int rte_tm_node_add(uint8_t port_id,
}
/* Delete node from traffic manager hierarchy */
-int rte_tm_node_delete(uint8_t port_id,
+int rte_tm_node_delete(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
@@ -266,7 +266,7 @@ int rte_tm_node_delete(uint8_t port_id,
}
/* Suspend node */
-int rte_tm_node_suspend(uint8_t port_id,
+int rte_tm_node_suspend(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
@@ -276,7 +276,7 @@ int rte_tm_node_suspend(uint8_t port_id,
}
/* Resume node */
-int rte_tm_node_resume(uint8_t port_id,
+int rte_tm_node_resume(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
@@ -286,7 +286,7 @@ int rte_tm_node_resume(uint8_t port_id,
}
/* Commit the initial port traffic manager hierarchy */
-int rte_tm_hierarchy_commit(uint8_t port_id,
+int rte_tm_hierarchy_commit(uint16_t port_id,
int clear_on_fail,
struct rte_tm_error *error)
{
@@ -296,7 +296,7 @@ int rte_tm_hierarchy_commit(uint8_t port_id,
}
/* Update node parent */
-int rte_tm_node_parent_update(uint8_t port_id,
+int rte_tm_node_parent_update(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
@@ -309,7 +309,7 @@ int rte_tm_node_parent_update(uint8_t port_id,
}
/* Update node private shaper */
-int rte_tm_node_shaper_update(uint8_t port_id,
+int rte_tm_node_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
@@ -320,7 +320,7 @@ int rte_tm_node_shaper_update(uint8_t port_id,
}
/* Update node shared shapers */
-int rte_tm_node_shared_shaper_update(uint8_t port_id,
+int rte_tm_node_shared_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_shaper_id,
int add,
@@ -332,7 +332,7 @@ int rte_tm_node_shared_shaper_update(uint8_t port_id,
}
/* Update node stats */
-int rte_tm_node_stats_update(uint8_t port_id,
+int rte_tm_node_stats_update(uint16_t port_id,
uint32_t node_id,
uint64_t stats_mask,
struct rte_tm_error *error)
@@ -343,7 +343,7 @@ int rte_tm_node_stats_update(uint8_t port_id,
}
/* Update WFQ weight mode */
-int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
uint32_t node_id,
int *wfq_weight_mode,
uint32_t n_sp_priorities,
@@ -355,7 +355,7 @@ int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
}
/* Update node congestion management mode */
-int rte_tm_node_cman_update(uint8_t port_id,
+int rte_tm_node_cman_update(uint16_t port_id,
uint32_t node_id,
enum rte_tm_cman_mode cman,
struct rte_tm_error *error)
@@ -366,7 +366,7 @@ int rte_tm_node_cman_update(uint8_t port_id,
}
/* Update node private WRED context */
-int rte_tm_node_wred_context_update(uint8_t port_id,
+int rte_tm_node_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
@@ -377,7 +377,7 @@ int rte_tm_node_wred_context_update(uint8_t port_id,
}
/* Update node shared WRED context */
-int rte_tm_node_shared_wred_context_update(uint8_t port_id,
+int rte_tm_node_shared_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_wred_context_id,
int add,
@@ -389,7 +389,7 @@ int rte_tm_node_shared_wred_context_update(uint8_t port_id,
}
/* Read and/or clear stats counters for specific node */
-int rte_tm_node_stats_read(uint8_t port_id,
+int rte_tm_node_stats_read(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_stats *stats,
uint64_t *stats_mask,
@@ -402,7 +402,7 @@ int rte_tm_node_stats_read(uint8_t port_id,
}
/* Packet marking - VLAN DEI */
-int rte_tm_mark_vlan_dei(uint8_t port_id,
+int rte_tm_mark_vlan_dei(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
@@ -414,7 +414,7 @@ int rte_tm_mark_vlan_dei(uint8_t port_id,
}
/* Packet marking - IPv4/IPv6 ECN */
-int rte_tm_mark_ip_ecn(uint8_t port_id,
+int rte_tm_mark_ip_ecn(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
@@ -426,7 +426,7 @@ int rte_tm_mark_ip_ecn(uint8_t port_id,
}
/* Packet marking - IPv4/IPv6 DSCP */
-int rte_tm_mark_ip_dscp(uint8_t port_id,
+int rte_tm_mark_ip_dscp(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
diff --git a/lib/librte_ether/rte_tm.h b/lib/librte_ether/rte_tm.h
index ebbfa1ee..2b25a871 100644
--- a/lib/librte_ether/rte_tm.h
+++ b/lib/librte_ether/rte_tm.h
@@ -1040,7 +1040,7 @@ struct rte_tm_error {
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
uint32_t *n_leaf_nodes,
struct rte_tm_error *error);
@@ -1064,7 +1064,7 @@ rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_node_type_get(uint8_t port_id,
+rte_tm_node_type_get(uint16_t port_id,
uint32_t node_id,
int *is_leaf,
struct rte_tm_error *error);
@@ -1082,7 +1082,7 @@ rte_tm_node_type_get(uint8_t port_id,
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_capabilities_get(uint8_t port_id,
+rte_tm_capabilities_get(uint16_t port_id,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error);
@@ -1102,7 +1102,7 @@ rte_tm_capabilities_get(uint8_t port_id,
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_level_capabilities_get(uint8_t port_id,
+rte_tm_level_capabilities_get(uint16_t port_id,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error);
@@ -1122,7 +1122,7 @@ rte_tm_level_capabilities_get(uint8_t port_id,
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_node_capabilities_get(uint8_t port_id,
+rte_tm_node_capabilities_get(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_capabilities *cap,
struct rte_tm_error *error);
@@ -1147,7 +1147,7 @@ rte_tm_node_capabilities_get(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_n_max
*/
int
-rte_tm_wred_profile_add(uint8_t port_id,
+rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_wred_params *profile,
struct rte_tm_error *error);
@@ -1170,7 +1170,7 @@ rte_tm_wred_profile_add(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_n_max
*/
int
-rte_tm_wred_profile_delete(uint8_t port_id,
+rte_tm_wred_profile_delete(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
@@ -1201,7 +1201,7 @@ rte_tm_wred_profile_delete(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_shared_wred_context_add_update(uint8_t port_id,
+rte_tm_shared_wred_context_add_update(uint16_t port_id,
uint32_t shared_wred_context_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
@@ -1225,7 +1225,7 @@ rte_tm_shared_wred_context_add_update(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_shared_wred_context_delete(uint8_t port_id,
+rte_tm_shared_wred_context_delete(uint16_t port_id,
uint32_t shared_wred_context_id,
struct rte_tm_error *error);
@@ -1249,7 +1249,7 @@ rte_tm_shared_wred_context_delete(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_n_max
*/
int
-rte_tm_shaper_profile_add(uint8_t port_id,
+rte_tm_shaper_profile_add(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_shaper_params *profile,
struct rte_tm_error *error);
@@ -1272,7 +1272,7 @@ rte_tm_shaper_profile_add(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_n_max
*/
int
-rte_tm_shaper_profile_delete(uint8_t port_id,
+rte_tm_shaper_profile_delete(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
@@ -1301,7 +1301,7 @@ rte_tm_shaper_profile_delete(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_shared_shaper_add_update(uint8_t port_id,
+rte_tm_shared_shaper_add_update(uint16_t port_id,
uint32_t shared_shaper_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
@@ -1324,7 +1324,7 @@ rte_tm_shared_shaper_add_update(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_shared_shaper_delete(uint8_t port_id,
+rte_tm_shared_shaper_delete(uint16_t port_id,
uint32_t shared_shaper_id,
struct rte_tm_error *error);
@@ -1392,7 +1392,7 @@ rte_tm_shared_shaper_delete(uint8_t port_id,
* @see struct rte_tm_capabilities
*/
int
-rte_tm_node_add(uint8_t port_id,
+rte_tm_node_add(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
@@ -1425,7 +1425,7 @@ rte_tm_node_add(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_ADD_DELETE
*/
int
-rte_tm_node_delete(uint8_t port_id,
+rte_tm_node_delete(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
@@ -1449,7 +1449,7 @@ rte_tm_node_delete(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
*/
int
-rte_tm_node_suspend(uint8_t port_id,
+rte_tm_node_suspend(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
@@ -1472,7 +1472,7 @@ rte_tm_node_suspend(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
*/
int
-rte_tm_node_resume(uint8_t port_id,
+rte_tm_node_resume(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
@@ -1513,7 +1513,7 @@ rte_tm_node_resume(uint8_t port_id,
* @see rte_tm_node_delete()
*/
int
-rte_tm_hierarchy_commit(uint8_t port_id,
+rte_tm_hierarchy_commit(uint16_t port_id,
int clear_on_fail,
struct rte_tm_error *error);
@@ -1549,7 +1549,7 @@ rte_tm_hierarchy_commit(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL
*/
int
-rte_tm_node_parent_update(uint8_t port_id,
+rte_tm_node_parent_update(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
@@ -1578,7 +1578,7 @@ rte_tm_node_parent_update(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_private_n_max
*/
int
-rte_tm_node_shaper_update(uint8_t port_id,
+rte_tm_node_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
@@ -1605,7 +1605,7 @@ rte_tm_node_shaper_update(uint8_t port_id,
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_node_shared_shaper_update(uint8_t port_id,
+rte_tm_node_shared_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_shaper_id,
int add,
@@ -1632,7 +1632,7 @@ rte_tm_node_shared_shaper_update(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_STATS
*/
int
-rte_tm_node_stats_update(uint8_t port_id,
+rte_tm_node_stats_update(uint16_t port_id,
uint32_t node_id,
uint64_t stats_mask,
struct rte_tm_error *error);
@@ -1660,7 +1660,7 @@ rte_tm_node_stats_update(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_N_SP_PRIORITIES
*/
int
-rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
uint32_t node_id,
int *wfq_weight_mode,
uint32_t n_sp_priorities,
@@ -1683,7 +1683,7 @@ rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
* @see RTE_TM_UPDATE_NODE_CMAN
*/
int
-rte_tm_node_cman_update(uint8_t port_id,
+rte_tm_node_cman_update(uint16_t port_id,
uint32_t node_id,
enum rte_tm_cman_mode cman,
struct rte_tm_error *error);
@@ -1707,7 +1707,7 @@ rte_tm_node_cman_update(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_private_n_max
*/
int
-rte_tm_node_wred_context_update(uint8_t port_id,
+rte_tm_node_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
@@ -1732,7 +1732,7 @@ rte_tm_node_wred_context_update(uint8_t port_id,
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_node_shared_wred_context_update(uint8_t port_id,
+rte_tm_node_shared_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_wred_context_id,
int add,
@@ -1764,7 +1764,7 @@ rte_tm_node_shared_wred_context_update(uint8_t port_id,
* @see enum rte_tm_stats_type
*/
int
-rte_tm_node_stats_read(uint8_t port_id,
+rte_tm_node_stats_read(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_stats *stats,
uint64_t *stats_mask,
@@ -1801,7 +1801,7 @@ rte_tm_node_stats_read(uint8_t port_id,
* @see struct rte_tm_capabilities::mark_vlan_dei_supported
*/
int
-rte_tm_mark_vlan_dei(uint8_t port_id,
+rte_tm_mark_vlan_dei(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
@@ -1851,7 +1851,7 @@ rte_tm_mark_vlan_dei(uint8_t port_id,
* @see struct rte_tm_capabilities::mark_ip_ecn_sctp_supported
*/
int
-rte_tm_mark_ip_ecn(uint8_t port_id,
+rte_tm_mark_ip_ecn(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
@@ -1899,7 +1899,7 @@ rte_tm_mark_ip_ecn(uint8_t port_id,
* @see struct rte_tm_capabilities::mark_ip_dscp_supported
*/
int
-rte_tm_mark_ip_dscp(uint8_t port_id,
+rte_tm_mark_ip_dscp(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h
index a5b698fe..b2e8ccf8 100644
--- a/lib/librte_ether/rte_tm_driver.h
+++ b/lib/librte_ether/rte_tm_driver.h
@@ -357,7 +357,7 @@ rte_tm_error_set(struct rte_tm_error *error,
* success, NULL otherwise.
*/
const struct rte_tm_ops *
-rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error);
+rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error);
#ifdef __cplusplus
}
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index 410578a1..5ac22cde 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -34,15 +34,17 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 2
+LIBABIVER := 3
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash
# library source files
SRCS-y += rte_eventdev.c
SRCS-y += rte_event_ring.c
+SRCS-y += rte_event_eth_rx_adapter.c
# export include files
SYMLINK-y-include += rte_eventdev.h
@@ -50,6 +52,7 @@ SYMLINK-y-include += rte_eventdev_pmd.h
SYMLINK-y-include += rte_eventdev_pmd_pci.h
SYMLINK-y-include += rte_eventdev_pmd_vdev.h
SYMLINK-y-include += rte_event_ring.h
+SYMLINK-y-include += rte_event_eth_rx_adapter.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
new file mode 100644
index 00000000..90106e6c
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -0,0 +1,1240 @@
+#include <rte_cycles.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+#include <rte_thash.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_eth_rx_adapter.h"
+
+#define BATCH_SIZE 32
+#define BLOCK_CNT_THRESHOLD 10
+#define ETH_EVENT_BUFFER_SIZE (4*BATCH_SIZE)
+
+#define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
+#define ETH_RX_ADAPTER_MEM_NAME_LEN 32
+
+#define RSS_KEY_SIZE 40
+
+/*
+ * There is an instance of this struct per polled Rx queue added to the
+ * adapter
+ */
+struct eth_rx_poll_entry {
+ /* Eth port to poll */
+ uint8_t eth_dev_id;
+ /* Eth rx queue to poll */
+ uint16_t eth_rx_qid;
+};
+
+/* Instance per adapter */
+struct rte_eth_event_enqueue_buffer {
+ /* Count of events in this buffer */
+ uint16_t count;
+ /* Array of events in this buffer */
+ struct rte_event events[ETH_EVENT_BUFFER_SIZE];
+};
+
+struct rte_event_eth_rx_adapter {
+ /* RSS key */
+ uint8_t rss_key_be[RSS_KEY_SIZE];
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Per ethernet device structure */
+ struct eth_device_info *eth_devices;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t rx_lock;
+ /* Max mbufs processed in any service function invocation */
+ uint32_t max_nb_rx;
+ /* Receive queues that need to be polled */
+ struct eth_rx_poll_entry *eth_rx_poll;
+ /* Size of the eth_rx_poll array */
+ uint16_t num_rx_polled;
+ /* Weighted round robin schedule */
+ uint32_t *wrr_sched;
+ /* wrr_sched[] size */
+ uint32_t wrr_len;
+ /* Next entry in wrr[] to begin polling */
+ uint32_t wrr_pos;
+ /* Event burst buffer */
+ struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+ /* Per adapter stats */
+ struct rte_event_eth_rx_adapter_stats stats;
+ /* Block count, counts up to BLOCK_CNT_THRESHOLD */
+ uint16_t enq_block_count;
+ /* Block start ts */
+ uint64_t rx_enq_block_start_ts;
+ /* Configuration callback for rte_service configuration */
+ rte_event_eth_rx_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Total count of Rx queues in adapter */
+ uint32_t nb_queues;
+ /* Memory allocation name */
+ char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+} __rte_cache_aligned;
+
+/* Per eth device */
+struct eth_device_info {
+ struct rte_eth_dev *dev;
+ struct eth_rx_queue_info *rx_queue;
+ /* Set if ethdev->eventdev packet transfer uses a
+ * hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set if the adapter is processing rx queues for
+ * this eth device and packet processing has been
+ * started, allows for the code to know if the PMD
+ * rx_adapter_stop callback needs to be invoked
+ */
+ uint8_t dev_rx_started;
+ /* If nb_dev_queues > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t nb_dev_queues;
+};
+
+/* Per Rx queue */
+struct eth_rx_queue_info {
+ int queue_enabled; /* True if added */
+ uint16_t wt; /* Polling weight */
+ uint8_t event_queue_id; /* Event queue to enqueue packets to */
+ uint8_t sched_type; /* Sched type for events */
+ uint8_t priority; /* Event priority */
+ uint32_t flow_id; /* App provided flow identifier */
+ uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
+};
+
+static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+
+static inline int
+valid_id(uint8_t id)
+{
+ return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+}
+
+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!valid_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return rx_adapter->num_rx_polled;
+}
+
+/* Greatest common divisor */
+static uint16_t gcd_u16(uint16_t a, uint16_t b)
+{
+ uint16_t r = a % b;
+
+ return r ? gcd_u16(b, r) : b;
+}
+
+/* Returns the next queue in the polling sequence
+ *
+ * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
+ */
+static int
+wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
+ unsigned int n, int *cw,
+ struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+ uint16_t gcd, int prev)
+{
+ int i = prev;
+ uint16_t w;
+
+ while (1) {
+ uint16_t q;
+ uint8_t d;
+
+ i = (i + 1) % n;
+ if (i == 0) {
+ *cw = *cw - gcd;
+ if (*cw <= 0)
+ *cw = max_wt;
+ }
+
+ q = eth_rx_poll[i].eth_rx_qid;
+ d = eth_rx_poll[i].eth_dev_id;
+ w = rx_adapter->eth_devices[d].rx_queue[q].wt;
+
+ if ((int)w >= *cw)
+ return i;
+ }
+}
+
+/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static int
+eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint8_t d;
+ uint16_t q;
+ unsigned int i;
+
+ /* Initialize variables for calculation of wrr schedule */
+ uint16_t max_wrr_pos = 0;
+ unsigned int poll_q = 0;
+ uint16_t max_wt = 0;
+ uint16_t gcd = 0;
+
+ struct eth_rx_poll_entry *rx_poll = NULL;
+ uint32_t *rx_wrr = NULL;
+
+ if (rx_adapter->num_rx_polled) {
+ size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
+ sizeof(*rx_adapter->eth_rx_poll),
+ RTE_CACHE_LINE_SIZE);
+ rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (rx_poll == NULL)
+ return -ENOMEM;
+
+ /* Generate array of all queues to poll, the size of this
+ * array is poll_q
+ */
+ for (d = 0; d < rte_eth_dev_count(); d++) {
+ uint16_t nb_rx_queues;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[d];
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ if (dev_info->rx_queue == NULL)
+ continue;
+ for (q = 0; q < nb_rx_queues; q++) {
+ struct eth_rx_queue_info *queue_info =
+ &dev_info->rx_queue[q];
+ if (queue_info->queue_enabled == 0)
+ continue;
+
+ uint16_t wt = queue_info->wt;
+ rx_poll[poll_q].eth_dev_id = d;
+ rx_poll[poll_q].eth_rx_qid = q;
+ max_wrr_pos += wt;
+ max_wt = RTE_MAX(max_wt, wt);
+ gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
+ poll_q++;
+ }
+ }
+
+ len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
+ RTE_CACHE_LINE_SIZE);
+ rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (rx_wrr == NULL) {
+ rte_free(rx_poll);
+ return -ENOMEM;
+ }
+
+ /* Generate polling sequence based on weights */
+ int prev = -1;
+ int cw = -1;
+ for (i = 0; i < max_wrr_pos; i++) {
+ rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
+ rx_poll, max_wt, gcd, prev);
+ prev = rx_wrr[i];
+ }
+ }
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = max_wrr_pos;
+
+ return 0;
+}
+
+static inline void
+mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
+ struct ipv6_hdr **ipv6_hdr)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct vlan_hdr *vlan_hdr;
+
+ *ipv4_hdr = NULL;
+ *ipv6_hdr = NULL;
+
+ switch (eth_hdr->ether_type) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_VLAN):
+ vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ switch (vlan_hdr->eth_proto) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
+ break;
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Calculate RSS hash for IPv4/6 */
+static inline uint32_t
+do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
+{
+ uint32_t input_len;
+ void *tuple;
+ struct rte_ipv4_tuple ipv4_tuple;
+ struct rte_ipv6_tuple ipv6_tuple;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+
+ mtoip(m, &ipv4_hdr, &ipv6_hdr);
+
+ if (ipv4_hdr) {
+ ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
+ ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+ tuple = &ipv4_tuple;
+ input_len = RTE_THASH_V4_L3_LEN;
+ } else if (ipv6_hdr) {
+ rte_thash_load_v6_addrs(ipv6_hdr,
+ (union rte_thash_tuple *)&ipv6_tuple);
+ tuple = &ipv6_tuple;
+ input_len = RTE_THASH_V6_L3_LEN;
+ } else
+ return 0;
+
+ return rte_softrss_be(tuple, input_len, rss_key_be);
+}
+
+static inline int
+rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return !!rx_adapter->enq_block_count;
+}
+
+static inline void
+rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->rx_enq_block_start_ts)
+ return;
+
+ rx_adapter->enq_block_count++;
+ if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
+ return;
+
+ rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
+}
+
+static inline void
+rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ if (unlikely(!stats->rx_enq_start_ts))
+ stats->rx_enq_start_ts = rte_get_tsc_cycles();
+
+ if (likely(!rx_enq_blocked(rx_adapter)))
+ return;
+
+ rx_adapter->enq_block_count = 0;
+ if (rx_adapter->rx_enq_block_start_ts) {
+ stats->rx_enq_end_ts = rte_get_tsc_cycles();
+ stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
+ rx_adapter->rx_enq_block_start_ts;
+ rx_adapter->rx_enq_block_start_ts = 0;
+ }
+}
+
+/* Add event to buffer, free space check is done prior to calling
+ * this function
+ */
+static inline void
+buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event *ev)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));
+}
+
+/* Enqueue buffered events to event device */
+static inline uint16_t
+flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+
+ uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
+ rx_adapter->event_port_id,
+ buf->events,
+ buf->count);
+ if (n != buf->count) {
+ memmove(buf->events,
+ &buf->events[n],
+ (buf->count - n) * sizeof(struct rte_event));
+ stats->rx_enq_retry++;
+ }
+
+ n ? rx_enq_block_end_ts(rx_adapter, stats) :
+ rx_enq_block_start_ts(rx_adapter);
+
+ buf->count -= n;
+ stats->rx_enq_count += n;
+
+ return n;
+}
+
+static inline void
+fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint8_t dev_id,
+ uint16_t rx_queue_id,
+ struct rte_mbuf **mbufs,
+ uint16_t num)
+{
+ uint32_t i;
+ struct eth_device_info *eth_device_info =
+ &rx_adapter->eth_devices[dev_id];
+ struct eth_rx_queue_info *eth_rx_queue_info =
+ &eth_device_info->rx_queue[rx_queue_id];
+
+ int32_t qid = eth_rx_queue_info->event_queue_id;
+ uint8_t sched_type = eth_rx_queue_info->sched_type;
+ uint8_t priority = eth_rx_queue_info->priority;
+ uint32_t flow_id;
+ struct rte_event events[BATCH_SIZE];
+ struct rte_mbuf *m = mbufs[0];
+ uint32_t rss_mask;
+ uint32_t rss;
+ int do_rss;
+
+ /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
+ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+ do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+ struct rte_event *ev = &events[i];
+
+ rss = do_rss ?
+ do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;
+ flow_id =
+ eth_rx_queue_info->flow_id &
+ eth_rx_queue_info->flow_id_mask;
+ flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
+
+ ev->flow_id = flow_id;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = qid;
+ ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
+ ev->sub_event_type = 0;
+ ev->priority = priority;
+ ev->mbuf = m;
+
+ buf_event_enqueue(rx_adapter, ev);
+ }
+}
+
+/*
+ * Polls receive queues added to the event adapter and enqueues received
+ * packets to the event device.
+ *
+ * The receive code enqueues initially to a temporary buffer, the
+ * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
+ *
+ * If there isn't space available in the temporary buffer, packets from the
+ * Rx queue aren't dequeued from the eth device, this back pressures the
+ * eth device, in virtual device environments this back pressure is relayed to
+ * the hypervisor's switching layer where adjustments can be made to deal with
+ * it.
+ */
+static inline uint32_t
+eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t num_queue;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf;
+ uint32_t wrr_pos;
+ uint32_t max_nb_rx;
+
+ wrr_pos = rx_adapter->wrr_pos;
+ max_nb_rx = rx_adapter->max_nb_rx;
+ buf = &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+
+ /* Iterate through a WRR sequence */
+ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
+ unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
+ uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
+ uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ if (buf->count >= BATCH_SIZE)
+ flush_event_buffer(rx_adapter);
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
+ break;
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
+
+ if (n) {
+ stats->rx_packets += n;
+ /* The check before rte_eth_rx_burst() ensures that
+ * all n mbufs can be buffered
+ */
+ fill_event_buffer(rx_adapter, d, qid, mbufs, n);
+ nb_rx += n;
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
+ (wrr_pos + 1) % rx_adapter->wrr_len;
+ return nb_rx;
+ }
+ }
+
+ if (++wrr_pos == rx_adapter->wrr_len)
+ wrr_pos = 0;
+ }
+
+ return nb_rx;
+}
+
+static int
+event_eth_rx_adapter_service_func(void *args)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = args;
+ struct rte_eth_event_enqueue_buffer *buf;
+
+ buf = &rx_adapter->event_enqueue_buffer;
+ if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
+ return 0;
+ if (eth_rx_poll(rx_adapter) == 0 && buf->count)
+ flush_event_buffer(rx_adapter);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ return 0;
+}
+
+static int
+rte_event_eth_rx_adapter_init(void)
+{
+ const char *name = "rte_event_eth_rx_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_eth_rx_adapter) *
+ RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_eth_rx_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_eth_rx_adapter *
+id_to_rx_adapter(uint8_t id)
+{
+ return event_eth_rx_adapter ?
+ event_eth_rx_adapter[id] : NULL;
+}
+
+static int
+default_conf_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf, void *arg)
+{
+ int ret;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ int started;
+ uint8_t port_id;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
+ dev_id);
+ if (started)
+ rte_event_dev_start(dev_id);
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
+ port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb_rx = 128;
+ if (started)
+ rte_event_dev_start(dev_id);
+ rx_adapter->default_cb_arg = 1;
+ return ret;
+}
+
+static int
+init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+{
+ int ret;
+ struct rte_service_spec service;
+ struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
+
+ if (rx_adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d", id);
+ service.socket_id = rx_adapter->socket_id;
+ service.callback = event_eth_rx_adapter_service_func;
+ service.callback_userdata = rx_adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &rx_adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
+ &rx_adapter_conf, rx_adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ goto err_done;
+ }
+ rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
+ rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
+ rx_adapter->service_inited = 1;
+ return 0;
+
+err_done:
+ rte_service_component_unregister(rx_adapter->service_id);
+ return ret;
+}
+
+
+static void
+update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ uint8_t add)
+{
+ struct eth_rx_queue_info *queue_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->rx_queue == NULL)
+ return;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ update_queue_info(rx_adapter, dev_info, i, add);
+ } else {
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ enabled = queue_info->queue_enabled;
+ if (add) {
+ rx_adapter->nb_queues += !enabled;
+ dev_info->nb_dev_queues += !enabled;
+ } else {
+ rx_adapter->nb_queues -= enabled;
+ dev_info->nb_dev_queues -= enabled;
+ }
+ queue_info->queue_enabled = !!add;
+ }
+}
+
+static int
+event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ if (rx_adapter->nb_queues == 0)
+ return 0;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ rx_adapter->num_rx_polled -= queue_info->queue_enabled;
+ update_queue_info(rx_adapter, dev_info, rx_queue_id, 0);
+ return 0;
+}
+
+static void
+event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf)
+
+{
+ struct eth_rx_queue_info *queue_info;
+ const struct rte_event *ev = &conf->ev;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ queue_info->event_queue_id = ev->queue_id;
+ queue_info->sched_type = ev->sched_type;
+ queue_info->priority = ev->priority;
+ queue_info->wt = conf->servicing_weight;
+
+ if (conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
+ queue_info->flow_id = ev->flow_id;
+ queue_info->flow_id_mask = ~0;
+ }
+
+ /* The same queue can be added more than once */
+ rx_adapter->num_rx_polled += !queue_info->queue_enabled;
+ update_queue_info(rx_adapter, dev_info, rx_queue_id, 1);
+}
+
+static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint8_t eth_dev_id,
+ int rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ struct rte_event_eth_rx_adapter_queue_conf temp_conf;
+ uint32_t i;
+ int ret;
+
+ if (queue_conf->servicing_weight == 0) {
+
+ struct rte_eth_dev_data *data = dev_info->dev->data;
+ if (data->dev_conf.intr_conf.rxq) {
+ RTE_EDEV_LOG_ERR("Interrupt driven queues"
+ " not supported");
+ return -ENOTSUP;
+ }
+ temp_conf = *queue_conf;
+
+ /* If Rx interrupts are disabled set wt = 1 */
+ temp_conf.servicing_weight = 1;
+ queue_conf = &temp_conf;
+ }
+
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ event_eth_rx_adapter_queue_add(rx_adapter,
+ dev_info, i,
+ queue_conf);
+ } else {
+ event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
+ (uint16_t)rx_queue_id,
+ queue_conf);
+ }
+
+ ret = eth_poll_wrr_calc(rx_adapter);
+ if (ret) {
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info, rx_queue_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rx_adapter_ctrl(uint8_t id, int start)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int use_service = 0;
+ int stop = !start;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ /* if start check for num dev queues */
+ if (start && !dev_info->nb_dev_queues)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_rx_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_rx_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
+ &rte_eth_devices[i]) :
+ (*dev->dev_ops->eth_rx_adapter_stop)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ if (use_service)
+ rte_service_runstate_set(rx_adapter->service_id, start);
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ int ret;
+ int socket_id;
+ uint8_t i;
+ char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
+ const uint8_t default_rss_key[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
+ };
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_eth_rx_adapter == NULL) {
+ ret = rte_event_eth_rx_adapter_init();
+ if (ret)
+ return ret;
+ }
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d",
+ id);
+
+ rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rx_adapter == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
+ return -ENOMEM;
+ }
+
+ rx_adapter->eventdev_id = dev_id;
+ rx_adapter->socket_id = socket_id;
+ rx_adapter->conf_cb = conf_cb;
+ rx_adapter->conf_arg = conf_arg;
+ strcpy(rx_adapter->mem_name, mem_name);
+ rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
+ rte_eth_dev_count() *
+ sizeof(struct eth_device_info), 0,
+ socket_id);
+ rte_convert_rss_key((const uint32_t *)default_rss_key,
+ (uint32_t *)rx_adapter->rss_key_be,
+ RTE_DIM(default_rss_key));
+
+ if (rx_adapter->eth_devices == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
+ rte_free(rx_adapter);
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&rx_adapter->rx_lock);
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
+
+ event_eth_rx_adapter[id] = rx_adapter;
+ if (conf_cb == default_conf_cb)
+ rx_adapter->default_cb_arg = 1;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
+ default_conf_cb,
+ pc);
+ if (ret)
+ rte_free(pc);
+ return ret;
+}
+
+int
+rte_event_eth_rx_adapter_free(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->nb_queues) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
+ rx_adapter->nb_queues);
+ return -EBUSY;
+ }
+
+ if (rx_adapter->default_cb_arg)
+ rte_free(rx_adapter->conf_arg);
+ rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter);
+ event_eth_rx_adapter[id] = NULL;
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint8_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ uint32_t cap;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ int start_service;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if ((rx_adapter == NULL) || (queue_conf == NULL))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu8, id, eth_dev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
+ && (queue_conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
+ RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
+ " eth port: %" PRIu8 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
+ (rx_queue_id != -1)) {
+ RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
+ "event queue id %u eth port %u", id, eth_dev_id);
+ return -EINVAL;
+ }
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ start_service = 0;
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
+ -ENOTSUP);
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id, queue_conf);
+ if (ret == 0) {
+ update_queue_info(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 1);
+ }
+ } else {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ ret = init_service(rx_adapter, id);
+ if (ret == 0)
+ ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
+ queue_conf);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ if (ret == 0)
+ start_service = !!sw_rx_adapter_queue_count(rx_adapter);
+ }
+
+ if (ret)
+ return ret;
+
+ if (start_service)
+ rte_service_component_runstate_set(rx_adapter->service_id, 1);
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+ int32_t rx_queue_id)
+{
+ int ret = 0;
+ struct rte_eventdev *dev;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ uint16_t i;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id);
+ if (ret == 0) {
+ update_queue_info(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 0);
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+ }
+ } else {
+ int rc;
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info,
+ i);
+ } else {
+ event_eth_rx_adapter_queue_del(rx_adapter,
+ dev_info,
+ (uint16_t)rx_queue_id);
+ }
+
+ rc = eth_poll_wrr_calc(rx_adapter);
+ if (rc)
+ RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
+ rc);
+
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ rte_service_component_runstate_set(rx_adapter->service_id,
+ sw_rx_adapter_queue_count(rx_adapter));
+ }
+
+ return ret;
+}
+
+
+int
+rte_event_eth_rx_adapter_start(uint8_t id)
+{
+ return rx_adapter_ctrl(id, 1);
+}
+
+int
+rte_event_eth_rx_adapter_stop(uint8_t id)
+{
+ return rx_adapter_ctrl(id, 0);
+}
+
+int
+rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_eth_rx_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
+ &rte_eth_devices[i],
+ &dev_stats);
+ if (ret)
+ continue;
+ dev_stats_sum.rx_packets += dev_stats.rx_packets;
+ dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
+ }
+
+ if (rx_adapter->service_inited)
+ *stats = rx_adapter->stats;
+
+ stats->rx_packets += dev_stats_sum.rx_packets;
+ stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = id_to_rx_adapter(id);
+ if (rx_adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->service_inited)
+ *service_id = rx_adapter->service_id;
+
+ return rx_adapter->service_inited ? 0 : -ESRCH;
+}
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
new file mode 100644
index 00000000..6a9e7edf
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENT_ETH_RX_ADAPTER_
+#define _RTE_EVENT_ETH_RX_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event Ethernet Rx Adapter
+ *
+ * An eventdev-based packet processing application enqueues/dequeues mbufs
+ * to/from the event device. Packet flow from the ethernet device to the event
+ * device can be accomplished using either HW or SW mechanisms depending on the
+ * platform and the particular combination of ethernet and event devices. The
+ * event ethernet Rx adapter provides common APIs to configure the packet flow
+ * from the ethernet devices to event devices across both these transfer
+ * mechanisms.
+ *
+ * The adapter uses a EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the ethernet device and the event device.
+ *
+ * The ethernet Rx event adapter's functions are:
+ * - rte_event_eth_rx_adapter_create_ext()
+ * - rte_event_eth_rx_adapter_create()
+ * - rte_event_eth_rx_adapter_free()
+ * - rte_event_eth_rx_adapter_queue_add()
+ * - rte_event_eth_rx_adapter_queue_del()
+ * - rte_event_eth_rx_adapter_start()
+ * - rte_event_eth_rx_adapter_stop()
+ * - rte_event_eth_rx_adapter_stats_get()
+ * - rte_event_eth_rx_adapter_stats_reset()
+ *
+ * The application creates an ethernet to event adapter using
+ * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
+ * functions.
+ * The adapter needs to know which ethernet rx queues to poll for mbufs as well
+ * as event device parameters such as the event queue identifier, event
+ * priority and scheduling type that the adapter should use when constructing
+ * events. The rte_event_eth_rx_adapter_queue_add() function is provided for
+ * this purpose.
+ * The servicing weight parameter in the rte_event_eth_rx_adapter_queue_conf
+ * is applicable when the Rx adapter uses a service core function and is
+ * intended to provide application control of the frequency of polling ethernet
+ * device receive queues, for example, the application may want to poll higher
+ * priority queues with a higher frequency but at the same time not starve
+ * lower priority queues completely. If this parameter is zero and the receive
+ * interrupt is enabled when configuring the device, the receive queue is
+ * interrupt driven; else, the queue is assigned a servicing weight of one.
+ *
+ * The application can start/stop the adapter using the
+ * rte_event_eth_rx_adapter_start() and the rte_event_eth_rx_adapter_stop()
+ * functions. If the adapter uses a rte_service function, then the application
+ * is also required to assign a core to the service function and control the
+ * service core using the rte_service APIs. The
+ * rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
+ * the service function ID of the adapter in this case.
+ *
+ * Note: Interrupt driven receive queues are currently unimplemented.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_service.h>
+
+#include "rte_eventdev.h"
+
+#define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
+
+/* struct rte_event_eth_rx_adapter_queue_conf flags definitions */
+#define RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID 0x1
+/**< This flag indicates the flow identifier is valid
+ * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_eth_rx_adapter_conf_cb
+ */
+struct rte_event_eth_rx_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues mbuf events to this
+ * port.
+ */
+ uint32_t max_nb_rx;
+ /**< The adapter can return early if it has processed at least
+ * max_nb_rx mbufs. This isn't treated as a requirement; batching may
+ * cause the adapter to process more than max_nb_rx mbufs.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_eth_rx_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * ethdev queues to the event device. The SW service is created within the
+ * rte_event_eth_rx_adapter_queue_add() function if SW based packet transfers
+ * from ethdev queues to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param [out] conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_eth_rx_adapter_create_ext().
+ */
+typedef int (*rte_event_eth_rx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Rx queue configuration structure
+ */
+struct rte_event_eth_rx_adapter_queue_conf {
+ uint32_t rx_queue_flags;
+ /**< Flags for handling received packets
+ * @see RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID
+ */
+ uint16_t servicing_weight;
+ /**< Relative polling frequency of ethernet receive queue when the
+ * adapter uses a service core function for ethernet to event device
+ * transfers. If it is set to zero, the Rx queue is interrupt driven
+ * (unless rx queue interrupts are not enabled for the ethernet
+ * device).
+ */
+ struct rte_event ev;
+ /**<
+ * The values from the following event fields will be used when
+ * queuing mbuf events:
+ * - event_queue_id: Targeted event queue ID for received packets.
+ * - event_priority: Event priority of packets from this Rx queue in
+ * the event queue relative to other events.
+ * - sched_type: Scheduling type for packets from this Rx queue.
+ * - flow_id: If the RTE_ETH_RX_EVENT_ADAPTER_QUEUE_FLOW_ID_VALID bit
+ * is set in rx_queue_flags, this flow_id is used for all
+ * packets received from this queue. Otherwise the flow ID
+ * is set to the RSS hash of the src and dst IPv4/6
+ * addresses.
+ *
+ * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
+ * enqueued event.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * A structure used to retrieve statistics for an eth rx adapter instance.
+ */
+struct rte_event_eth_rx_adapter_stats {
+ uint64_t rx_poll_count;
+ /**< Receive queue poll count */
+ uint64_t rx_packets;
+ /**< Received packet count */
+ uint64_t rx_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t rx_enq_retry;
+ /**< Eventdev enqueue retry count */
+ uint64_t rx_enq_start_ts;
+ /**< Rx enqueue start timestamp */
+ uint64_t rx_enq_block_cycles;
+ /**< Cycles for which the service is blocked by the event device,
+ * i.e, the service fails to enqueue to the event device.
+ */
+ uint64_t rx_enq_end_ts;
+ /**< Latest timestamp at which the service is unblocked
+ * by the event device. The start, end timestamps and
+ * block cycles can be used to compute the percentage of
+ * cycles the service is blocked by the event device.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_eth_rx_adapter_conf struct passed into
+ * it.
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and setups up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_eth_rx_adapter_create_ext() version.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has Rx queues
+ * added to it, the function returns -EBUSY.
+ */
+int rte_event_eth_rx_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add receive queue to an event adapter. After a queue has been
+ * added to the event adapter, the result of the application calling
+ * rte_eth_rx_burst(eth_dev_id, rx_queue_id, ..) is undefined.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are added. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @param conf
+ * Additional configuration structure of type *rte_event_eth_rx_adapter_conf*
+ *
+ * @return
+ * - 0: Success, Receive queue added correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint8_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete receive queue from an event adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are deleted. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @return
+ * - 0: Success, Receive queue deleted correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+ int32_t rx_queue_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_ETH_RX_ADAPTER_ */
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index bbb38050..ce6a5dc1 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -56,6 +56,7 @@
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_errno.h>
+#include <rte_ethdev.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -128,55 +129,77 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
return 0;
}
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->eth_rx_adapter_caps_get ?
+ (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
+ &rte_eth_devices[eth_port_id],
+ caps)
+ : 0;
+}
+
static inline int
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
uint8_t old_nb_queues = dev->data->nb_queues;
- uint8_t *queues_prio;
+ struct rte_event_queue_conf *queues_cfg;
unsigned int i;
RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
dev->data->dev_id);
/* First time configuration */
- if (dev->data->queues_prio == NULL && nb_queues != 0) {
- /* Allocate memory to store queue priority */
- dev->data->queues_prio = rte_zmalloc_socket(
- "eventdev->data->queues_prio",
- sizeof(dev->data->queues_prio[0]) * nb_queues,
+ if (dev->data->queues_cfg == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue configuration */
+ dev->data->queues_cfg = rte_zmalloc_socket(
+ "eventdev->data->queues_cfg",
+ sizeof(dev->data->queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->queues_prio == NULL) {
+ if (dev->data->queues_cfg == NULL) {
dev->data->nb_queues = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+ RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
"nb_queues %u", nb_queues);
return -(ENOMEM);
}
/* Re-configure */
- } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->queue_release)(dev, i);
- /* Re allocate memory to store queue priority */
- queues_prio = dev->data->queues_prio;
- queues_prio = rte_realloc(queues_prio,
- sizeof(queues_prio[0]) * nb_queues,
+ /* Re allocate memory to store queue configuration */
+ queues_cfg = dev->data->queues_cfg;
+ queues_cfg = rte_realloc(queues_cfg,
+ sizeof(queues_cfg[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
- if (queues_prio == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+ if (queues_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
" nb_queues %u", nb_queues);
return -(ENOMEM);
}
- dev->data->queues_prio = queues_prio;
+ dev->data->queues_cfg = queues_cfg;
if (nb_queues > old_nb_queues) {
uint8_t new_qs = nb_queues - old_nb_queues;
- memset(queues_prio + old_nb_queues, 0,
- sizeof(queues_prio[0]) * new_qs);
+ memset(queues_cfg + old_nb_queues, 0,
+ sizeof(queues_cfg[0]) * new_qs);
}
- } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
+ } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
for (i = nb_queues; i < old_nb_queues; i++)
@@ -195,8 +218,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
uint8_t old_nb_ports = dev->data->nb_ports;
void **ports;
uint16_t *links_map;
- uint8_t *ports_dequeue_depth;
- uint8_t *ports_enqueue_depth;
+ struct rte_event_port_conf *ports_cfg;
unsigned int i;
RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
@@ -214,26 +236,14 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
return -(ENOMEM);
}
- /* Allocate memory to store ports dequeue depth */
- dev->data->ports_dequeue_depth =
- rte_zmalloc_socket("eventdev->ports_dequeue_depth",
- sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
+ /* Allocate memory to store port configurations */
+ dev->data->ports_cfg =
+ rte_zmalloc_socket("eventdev->ports_cfg",
+ sizeof(dev->data->ports_cfg[0]) * nb_ports,
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->ports_dequeue_depth == NULL) {
+ if (dev->data->ports_cfg == NULL) {
dev->data->nb_ports = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
- "nb_ports %u", nb_ports);
- return -(ENOMEM);
- }
-
- /* Allocate memory to store ports enqueue depth */
- dev->data->ports_enqueue_depth =
- rte_zmalloc_socket("eventdev->ports_enqueue_depth",
- sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
- RTE_CACHE_LINE_SIZE, dev->data->socket_id);
- if (dev->data->ports_enqueue_depth == NULL) {
- dev->data->nb_ports = 0;
- RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
+ RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
"nb_ports %u", nb_ports);
return -(ENOMEM);
}
@@ -257,8 +267,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
ports = dev->data->ports;
- ports_dequeue_depth = dev->data->ports_dequeue_depth;
- ports_enqueue_depth = dev->data->ports_enqueue_depth;
+ ports_cfg = dev->data->ports_cfg;
links_map = dev->data->links_map;
for (i = nb_ports; i < old_nb_ports; i++)
@@ -273,22 +282,12 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
return -(ENOMEM);
}
- /* Realloc memory for ports_dequeue_depth */
- ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
- sizeof(ports_dequeue_depth[0]) * nb_ports,
+ /* Realloc memory for ports_cfg */
+ ports_cfg = rte_realloc(ports_cfg,
+ sizeof(ports_cfg[0]) * nb_ports,
RTE_CACHE_LINE_SIZE);
- if (ports_dequeue_depth == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
- " nb_ports %u", nb_ports);
- return -(ENOMEM);
- }
-
- /* Realloc memory for ports_enqueue_depth */
- ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
- sizeof(ports_enqueue_depth[0]) * nb_ports,
- RTE_CACHE_LINE_SIZE);
- if (ports_enqueue_depth == NULL) {
- RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
+ if (ports_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
" nb_ports %u", nb_ports);
return -(ENOMEM);
}
@@ -314,18 +313,15 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
memset(ports + old_nb_ports, 0,
sizeof(ports[0]) * new_ps);
- memset(ports_dequeue_depth + old_nb_ports, 0,
- sizeof(ports_dequeue_depth[0]) * new_ps);
- memset(ports_enqueue_depth + old_nb_ports, 0,
- sizeof(ports_enqueue_depth[0]) * new_ps);
+ memset(ports_cfg + old_nb_ports, 0,
+ sizeof(ports_cfg[0]) * new_ps);
for (i = old_links_map_end; i < links_map_end; i++)
links_map[i] =
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
dev->data->ports = ports;
- dev->data->ports_dequeue_depth = ports_dequeue_depth;
- dev->data->ports_enqueue_depth = ports_enqueue_depth;
+ dev->data->ports_cfg = ports_cfg;
dev->data->links_map = links_map;
} else if (dev->data->ports != NULL && nb_ports == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
@@ -519,13 +515,13 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
static inline int
is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ATOMIC)
))
return 1;
else
@@ -535,13 +531,13 @@ is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
static inline int
is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
{
- if (queue_conf && (
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ORDERED)
))
return 1;
else
@@ -605,31 +601,10 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
queue_conf = &def_conf;
}
- dev->data->queues_prio[queue_id] = queue_conf->priority;
+ dev->data->queues_cfg[queue_id] = *queue_conf;
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
-uint8_t
-rte_event_queue_count(uint8_t dev_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- return dev->data->nb_queues;
-}
-
-uint8_t
-rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
-{
- struct rte_eventdev *dev;
-
- dev = &rte_eventdevs[dev_id];
- if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
- return dev->data->queues_prio[queue_id];
- else
- return RTE_EVENT_DEV_PRIORITY_NORMAL;
-}
-
static inline int
is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
{
@@ -726,10 +701,7 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
port_conf = &def_conf;
}
- dev->data->ports_dequeue_depth[port_id] =
- port_conf->dequeue_depth;
- dev->data->ports_enqueue_depth[port_id] =
- port_conf->enqueue_depth;
+ dev->data->ports_cfg[port_id] = *port_conf;
diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
@@ -743,31 +715,110 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return 0;
}
-uint8_t
-rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_dequeue_depth[port_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_DEV_ATTR_PORT_COUNT:
+ *attr_value = dev->data->nb_ports;
+ break;
+ case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
+ *attr_value = dev->data->nb_queues;
+ break;
+ case RTE_EVENT_DEV_ATTR_STARTED:
+ *attr_value = dev->data->dev_started;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
-uint8_t
-rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->ports_enqueue_depth[port_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ switch (attr_id) {
+ case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
+ *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
-uint8_t
-rte_event_port_count(uint8_t dev_id)
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value)
{
+ struct rte_event_queue_conf *conf;
struct rte_eventdev *dev;
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
- return dev->data->nb_ports;
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ conf = &dev->data->queues_cfg[queue_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+ *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ *attr_value = conf->priority;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
+ *attr_value = conf->nb_atomic_flows;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
+ *attr_value = conf->nb_atomic_order_sequences;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
+ *attr_value = conf->event_queue_cfg;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
+ if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
+ return -EOVERFLOW;
+
+ *attr_value = conf->schedule_type;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
}
int
@@ -912,6 +963,23 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
}
int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (service_id == NULL)
+ return -EINVAL;
+
+ if (dev->data->service_inited)
+ *service_id = dev->data->service_id;
+
+ return dev->data->service_inited ? 0 : -ESRCH;
+}
+
+int
rte_event_dev_dump(uint8_t dev_id, FILE *f)
{
struct rte_eventdev *dev;
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 128bc522..f1949ff7 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -218,10 +218,10 @@
* (each worker thread schedules events to its own port) or centralized
* (a dedicated thread schedules to all ports). Distributed software schedulers
* perform the scheduling in rte_event_dequeue_burst(), whereas centralized
- * scheduler logic is located in rte_event_schedule().
+ * scheduler logic need a dedicated service core for scheduling.
* The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
* indicates the device is centralized and thus needs a dedicated scheduling
- * thread that repeatedly calls rte_event_schedule().
+ * thread that repeatedly calls software specific scheduling function.
*
* An event driven worker thread has following typical workflow on fastpath:
* \code{.c}
@@ -263,16 +263,16 @@ struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
* In distributed scheduling mode, event scheduling happens in HW or
* rte_event_dequeue_burst() or the combination of these two.
* If the flag is not set then eventdev is centralized and thus needs a
- * dedicated scheduling thread that repeatedly calls rte_event_schedule().
+ * dedicated service core that acts as a scheduling thread .
*
- * @see rte_event_schedule(), rte_event_dequeue_burst()
+ * @see rte_event_dequeue_burst()
*/
#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
/**< Event device is capable of enqueuing events of any type to any queue.
* If this capability is not set, the queue only supports events of the
- * *RTE_EVENT_QUEUE_CFG_* type that it was created with.
+ * *RTE_SCHED_TYPE_* type that it was created with.
*
- * @see RTE_EVENT_QUEUE_CFG_* values
+ * @see RTE_SCHED_TYPE_* values
*/
#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
/**< Event device is capable of operating in burst mode for enqueue(forward,
@@ -399,6 +399,36 @@ struct rte_event_dev_info {
int
rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
+/**
+ * The count of ports.
+ */
+#define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
+/**
+ * The count of queues.
+ */
+#define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
+/**
+ * The status of the device, zero for stopped, non-zero for started.
+ */
+#define RTE_EVENT_DEV_ATTR_STARTED 2
+
+/**
+ * Get an attribute from a device.
+ *
+ * @param dev_id Eventdev id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful.
+ *
+ * @retval 0 Successfully retrieved attribute value
+ * -EINVAL Invalid device or *attr_id* provided, or *attr_value*
+ * is NULL
+ */
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value);
+
+
/* Event device configuration bitmap flags */
#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
/**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
@@ -485,39 +515,13 @@ rte_event_dev_configure(uint8_t dev_id,
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
-#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
-/**< Mask for event queue schedule type configuration request */
-#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
*
* @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
* @see rte_event_enqueue_burst()
*/
-#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
-/**< Allow only ATOMIC schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC
- *
- * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
-/**< Allow only ORDERED schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED
- *
- * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
-/**< Allow only PARALLEL schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL
- *
- * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
/**< This event queue links only to a single event port.
*
* @see rte_event_port_setup(), rte_event_port_link()
@@ -528,8 +532,8 @@ struct rte_event_queue_conf {
uint32_t nb_atomic_flows;
/**< The maximum number of active flows this queue can track at any
* given time. If the queue is configured for atomic scheduling (by
- * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES or
- * RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY flags to event_queue_cfg), then the
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
+ * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
* value must be in the range of [1, nb_event_queue_flows], which was
* previously provided in rte_event_dev_configure().
*/
@@ -542,12 +546,18 @@ struct rte_event_queue_conf {
* event will be returned from dequeue until one or more entries are
* freed up/released.
* If the queue is configured for ordered scheduling (by applying the
- * RTE_EVENT_QUEUE_CFG_ALL_TYPES or RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
- * flags to event_queue_cfg), then the value must be in the range of
- * [1, nb_event_queue_flows], which was previously supplied to
- * rte_event_dev_configure().
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
+ * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
+ * be in the range of [1, nb_event_queue_flows], which was
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg;
+ /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t schedule_type;
+ /**< Queue schedule type(RTE_SCHED_TYPE_*).
+ * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
+ * event_queue_cfg.
*/
- uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
uint8_t priority;
/**< Priority for this event queue relative to other event queues.
* The requested priority should in the range of
@@ -607,31 +617,45 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf);
/**
- * Get the number of event queues on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @return
- * - The number of configured event queues
+ * The priority of the queue.
*/
-uint8_t
-rte_event_queue_count(uint8_t dev_id);
+#define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
+/**
+ * The number of atomic flows configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
+/**
+ * The number of atomic order sequences configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
+/**
+ * The cfg flags for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
+/**
+ * The schedule type of the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
/**
- * Get the priority of the event queue on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param queue_id
- * Event queue identifier.
- * @return
- * - If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then the
- * configured priority of the event queue in
- * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST] range
- * else the value RTE_EVENT_DEV_PRIORITY_NORMAL
+ * Get an attribute from a queue.
+ *
+ * @param dev_id Eventdev id
+ * @param queue_id Eventdev queue id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful
+ *
+ * @retval 0 Successfully returned value
+ * -EINVAL invalid device, queue or attr_id provided, or attr_value
+ * was NULL
+ * -EOVERFLOW returned when attr_id is set to
+ * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES
*/
-uint8_t
-rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value);
/* Event port specific APIs */
@@ -715,47 +739,33 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
const struct rte_event_port_conf *port_conf);
/**
- * Get the number of dequeue queue depth configured for event port designated
- * by its *port_id* on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param port_id
- * Event port identifier.
- * @return
- * - The number of configured dequeue queue depth
- *
- * @see rte_event_dequeue_burst()
+ * The queue depth of the port on the enqueue side
*/
-uint8_t
-rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
-
+#define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
/**
- * Get the number of enqueue queue depth configured for event port designated
- * by its *port_id* on a specific event device
- *
- * @param dev_id
- * Event device identifier.
- * @param port_id
- * Event port identifier.
- * @return
- * - The number of configured enqueue queue depth
- *
- * @see rte_event_enqueue_burst()
+ * The queue depth of the port on the dequeue side
*/
-uint8_t
-rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
+#define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
+/**
+ * The new event threshold of the port
+ */
+#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
/**
- * Get the number of ports on a specific event device
+ * Get an attribute from a port.
*
- * @param dev_id
- * Event device identifier.
- * @return
- * - The number of configured ports
+ * @param dev_id Eventdev id
+ * @param port_id Eventdev port id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful
+ *
+ * @retval 0 Successfully returned value
+ * -EINVAL Invalid device, port or attr_id, or attr_value was NULL
*/
-uint8_t
-rte_event_port_count(uint8_t dev_id);
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value);
/**
* Start an event device.
@@ -871,6 +881,8 @@ rte_event_dev_close(uint8_t dev_id);
/**< The event generated from cpu for pipelining.
* Application may use *sub_event_type* to further classify the event
*/
+#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
+/**< The event generated from event eth Rx adapter */
#define RTE_EVENT_TYPE_MAX 0x10
/**< Maximum number of event types */
@@ -882,7 +894,10 @@ rte_event_dev_close(uint8_t dev_id);
#define RTE_EVENT_OP_FORWARD 1
/**< The CPU use this operation to forward the event to different event queue or
* change to new application specific flow or schedule type to enable
- * pipelining
+ * pipelining.
+ *
+ * This operation must only be enqueued to the same port that the
+ * event to be forwarded was dequeued from.
*/
#define RTE_EVENT_OP_RELEASE 2
/**< Release the flow context associated with the schedule type.
@@ -912,6 +927,9 @@ rte_event_dev_close(uint8_t dev_id);
* or no scheduling context is held then this function may be an NOOP,
* depending on the implementation.
*
+ * This operation must only be enqueued to the same port that the
+ * event to be released was dequeued from.
+ *
*/
/**
@@ -990,14 +1008,50 @@ struct rte_event {
};
};
+/* Ethdev Rx adapter capability bitmap flags */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
+/**< This flag is sent when the packet transfer mechanism is in HW.
+ * Ethdev can send packets to the event device using internal event port.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
+/**< Adapter supports multiple event queues per ethdev. Every ethdev
+ * Rx queue can be connected to a unique event queue.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
+/**< The application can override the adapter generated flow ID in the
+ * event. This flow ID can be specified when adding an ethdev Rx queue
+ * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * @see struct rte_event_eth_rx_adapter_queue_conf::ev
+ * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param eth_port_id
+ * The identifier of the ethernet device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps);
struct rte_eventdev_driver;
struct rte_eventdev_ops;
struct rte_eventdev;
-typedef void (*event_schedule_t)(struct rte_eventdev *dev);
-/**< @internal Schedule one or more events in the event dev. */
-
typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
/**< @internal Enqueue event on port of a device */
@@ -1034,12 +1088,10 @@ struct rte_eventdev_data {
/**< Number of event ports. */
void **ports;
/**< Array of pointers to ports. */
- uint8_t *ports_dequeue_depth;
- /**< Array of port dequeue depth. */
- uint8_t *ports_enqueue_depth;
- /**< Array of port enqueue depth. */
- uint8_t *queues_prio;
- /**< Array of queue priority. */
+ struct rte_event_port_conf *ports_cfg;
+ /**< Array of port configuration structures. */
+ struct rte_event_queue_conf *queues_cfg;
+ /**< Array of queue configuration structures. */
uint16_t *links_map;
/**< Memory to store queues to port connections. */
void *dev_private;
@@ -1048,6 +1100,10 @@ struct rte_eventdev_data {
/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
struct rte_event_dev_config dev_conf;
/**< Configuration applied to device. */
+ uint8_t service_inited;
+ /* Service initialization state */
+ uint32_t service_id;
+ /* Service ID*/
RTE_STD_C11
uint8_t dev_started : 1;
@@ -1059,8 +1115,6 @@ struct rte_eventdev_data {
/** @internal The data structure associated with each event device. */
struct rte_eventdev {
- event_schedule_t schedule;
- /**< Pointer to PMD schedule function. */
event_enqueue_t enqueue;
/**< Pointer to PMD enqueue function. */
event_enqueue_burst_t enqueue_burst;
@@ -1089,24 +1143,6 @@ struct rte_eventdev {
extern struct rte_eventdev *rte_eventdevs;
/** @internal The pool of rte_eventdev structures. */
-
-/**
- * Schedule one or more events in the event dev.
- *
- * An event dev implementation may define this is a NOOP, for instance if
- * the event dev performs its scheduling in hardware.
- *
- * @param dev_id
- * The identifier of the device.
- */
-static inline void
-rte_event_schedule(uint8_t dev_id)
-{
- struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- if (*dev->schedule)
- (*dev->schedule)(dev);
-}
-
static __rte_always_inline uint16_t
__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
const struct rte_event ev[], uint16_t nb_events,
@@ -1144,6 +1180,9 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
* The *nb_events* parameter is the number of event objects to enqueue which are
* supplied in the *ev* array of *rte_event* structure.
*
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
* The rte_event_enqueue_burst() function returns the number of
* events objects it actually enqueued. A return value equal to *nb_events*
* means that all event objects have been enqueued.
@@ -1346,6 +1385,9 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
* with RTE_EVENT_OP_RELEASE operation can be used to release the
* contexts early.
*
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
* @param dev_id
* The identifier of the device.
* @param port_id
@@ -1545,6 +1587,24 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint8_t priorities[]);
/**
+ * Retrieve the service ID of the event dev. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
+
+/**
* Dump internal information about *dev_id* to the FILE* provided in *f*.
*
* @param dev_id
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 3d72acf3..7a206c56 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -83,9 +83,19 @@ extern "C" {
} \
} while (0)
+#define RTE_EVENT_ETH_RX_ADAPTER_SW_CAP \
+ ((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))
+
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev use a SW service function
+ */
+
#define RTE_EVENTDEV_DETACHED (0)
#define RTE_EVENTDEV_ATTACHED (1)
+struct rte_eth_dev;
+
/** Global structure used for maintaining state of allocated event devices */
struct rte_eventdev_global {
uint8_t nb_devs; /**< Number of devices found */
@@ -429,6 +439,163 @@ typedef int (*eventdev_xstats_get_names_t)(const struct rte_eventdev *dev,
typedef uint64_t (*eventdev_xstats_get_by_name)(const struct rte_eventdev *dev,
const char *name, unsigned int *id);
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+
+struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
+
+/**
+ * Add ethernet Rx queues to event device. This callback is invoked if
+ * the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @param queue_conf
+ * Additional configuration structure
+
+ * @return
+ * - 0: Success, ethernet receive queue added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_add_t)(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+/**
+ * Delete ethernet Rx queues from event device. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @return
+ * - 0: Success, ethernet receive queue deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+
+/**
+ * Start ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(.., eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+/**
+ * Stop ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(..,eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_stats *stats;
+
+/**
+ * Retrieve ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_eth_rx_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ struct rte_event_eth_rx_adapter_stats *stats);
+/**
+ * Reset ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
/** Event device operations function pointer table */
struct rte_eventdev_ops {
eventdev_info_get_t dev_infos_get; /**< Get device info. */
@@ -468,6 +635,21 @@ struct rte_eventdev_ops {
/**< Get one value by name. */
eventdev_xstats_reset_t xstats_reset;
/**< Reset the statistics values in xstats. */
+
+ eventdev_eth_rx_adapter_caps_get_t eth_rx_adapter_caps_get;
+ /**< Get ethernet Rx adapter capabilities */
+ eventdev_eth_rx_adapter_queue_add_t eth_rx_adapter_queue_add;
+ /**< Add Rx queues to ethernet Rx adapter */
+ eventdev_eth_rx_adapter_queue_del_t eth_rx_adapter_queue_del;
+ /**< Delete Rx queues from ethernet Rx adapter */
+ eventdev_eth_rx_adapter_start_t eth_rx_adapter_start;
+ /**< Start ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stop_t eth_rx_adapter_stop;
+ /**< Stop ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stats_get eth_rx_adapter_stats_get;
+ /**< Get ethernet Rx stats */
+ eventdev_eth_rx_adapter_stats_reset eth_rx_adapter_stats_reset;
+ /**< Reset ethernet Rx stats */
};
/**
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
index b6bd7319..ade32b5d 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -50,6 +50,7 @@ extern "C" {
#include <rte_eal.h>
#include <rte_lcore.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include "rte_eventdev_pmd.h"
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
index 135e8b80..56232dec 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
@@ -48,7 +48,7 @@ extern "C" {
#include <rte_debug.h>
#include <rte_eal.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include "rte_eventdev_pmd.h"
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 4c48e5f0..108ae61f 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -19,17 +19,12 @@ DPDK_17.05 {
rte_event_port_default_conf_get;
rte_event_port_setup;
- rte_event_port_dequeue_depth;
- rte_event_port_enqueue_depth;
- rte_event_port_count;
rte_event_port_link;
rte_event_port_unlink;
rte_event_port_links_get;
rte_event_queue_default_conf_get;
rte_event_queue_setup;
- rte_event_queue_count;
- rte_event_queue_priority;
rte_event_dequeue_timeout_ticks;
@@ -51,3 +46,25 @@ DPDK_17.08 {
rte_event_ring_init;
rte_event_ring_lookup;
} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_event_dev_attr_get;
+ rte_event_dev_service_id_get;
+ rte_event_port_attr_get;
+ rte_event_queue_attr_get;
+
+ rte_event_eth_rx_adapter_caps_get;
+ rte_event_eth_rx_adapter_create;
+ rte_event_eth_rx_adapter_create_ext;
+ rte_event_eth_rx_adapter_free;
+ rte_event_eth_rx_adapter_queue_add;
+ rte_event_eth_rx_adapter_queue_del;
+ rte_event_eth_rx_adapter_service_id_get;
+ rte_event_eth_rx_adapter_start;
+ rte_event_eth_rx_adapter_stats_get;
+ rte_event_eth_rx_adapter_stats_reset;
+ rte_event_eth_rx_adapter_stop;
+
+} DPDK_17.08;
diff --git a/lib/librte_flow_classify/Makefile b/lib/librte_flow_classify/Makefile
new file mode 100644
index 00000000..ea792f5d
--- /dev/null
+++ b/lib/librte_flow_classify/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_flow_classify.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_flow_classify_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_net -lrte_table -lrte_acl
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify.c
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify_parse.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)-include := rte_flow_classify.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_flow_classify/rte_flow_classify.c b/lib/librte_flow_classify/rte_flow_classify.c
new file mode 100644
index 00000000..e6f44864
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.c
@@ -0,0 +1,691 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+#include <rte_table_acl.h>
+#include <stdbool.h>
+
+int librte_flow_classify_logtype;
+
+static struct rte_eth_ntuple_filter ntuple_filter;
+static uint32_t unique_id = 1;
+
+
+struct rte_flow_classify_table_entry {
+ /* meta-data for classify rule */
+ uint32_t rule_id;
+};
+
+struct rte_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ uint32_t entry_size;
+ enum rte_flow_classify_table_type type;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+};
+
+#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
+
+struct rte_flow_classifier {
+ /* Input parameters */
+ char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
+ int socket_id;
+ enum rte_flow_classify_table_type type;
+
+ /* Internal tables */
+ struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
+ uint32_t num_tables;
+ uint16_t nb_pkts;
+ struct rte_flow_classify_table_entry
+ *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+} __rte_cache_aligned;
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+struct acl_keys {
+ struct rte_table_acl_rule_add_params key_add; /* add key */
+ struct rte_table_acl_rule_delete_params key_del; /* delete key */
+};
+
+struct classify_rules {
+ enum rte_flow_classify_rule_type type;
+ union {
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+ } u;
+};
+
+struct rte_flow_classify_rule {
+ uint32_t id; /* unique ID of classify rule */
+ struct rte_flow_action action; /* action when match found */
+ struct classify_rules rules; /* union of rules */
+ union {
+ struct acl_keys key;
+ } u;
+ int key_found; /* rule key found in table */
+ void *entry; /* pointer to buffer to hold rule meta data */
+ void *entry_ptr; /* handle to the table entry for rule meta data */
+};
+
+static int
+flow_classify_parse_flow(
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items;
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0;
+ uint32_t i = 0;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(ntuple_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = malloc(item_num * sizeof(struct rte_flow_item));
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for pattern items.");
+ return -ENOMEM;
+ }
+
+ memset(items, 0, item_num * sizeof(struct rte_flow_item));
+ classify_pattern_skip_void_item(items, pattern);
+
+ parse_filter = classify_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ free(items);
+ return -EINVAL;
+ }
+
+ ret = parse_filter(attr, items, actions, &ntuple_filter, error);
+ free(items);
+ return ret;
+}
+
+
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+ *a = (unsigned char)(ip >> 24 & 0xff);\
+ *b = (unsigned char)(ip >> 16 & 0xff);\
+ *c = (unsigned char)(ip >> 8 & 0xff);\
+ *d = (unsigned char)(ip & 0xff);\
+ } while (0)
+
+static inline void
+print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
+{
+ unsigned char a, b, c, d;
+
+ printf("%s: 0x%02hhx/0x%hhx ", __func__,
+ key->field_value[PROTO_FIELD_IPV4].value.u8,
+ key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+ uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+ uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+ printf("%hu : 0x%x %hu : 0x%x",
+ key->field_value[SRCP_FIELD_IPV4].value.u16,
+ key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+ key->field_value[DSTP_FIELD_IPV4].value.u16,
+ key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+
+ printf(" priority: 0x%x\n", key->priority);
+}
+
+static inline void
+print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
+{
+ unsigned char a, b, c, d;
+
+ printf("%s: 0x%02hhx/0x%hhx ", __func__,
+ key->field_value[PROTO_FIELD_IPV4].value.u8,
+ key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+ uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+ uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+ printf("%hu : 0x%x %hu : 0x%x\n",
+ key->field_value[SRCP_FIELD_IPV4].value.u16,
+ key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+ key->field_value[DSTP_FIELD_IPV4].value.u16,
+ key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+}
+
+static int
+rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
+{
+ if (params == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* name */
+ if (params->name == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter name\n", __func__);
+ return -EINVAL;
+ }
+
+ /* socket */
+ if ((params->socket_id < 0) ||
+ (params->socket_id >= RTE_MAX_NUMA_NODES)) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter socket_id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct rte_flow_classifier *
+rte_flow_classifier_create(struct rte_flow_classifier_params *params)
+{
+ struct rte_flow_classifier *cls;
+ int ret;
+
+ /* Check input parameters */
+ ret = rte_flow_classifier_check_params(params);
+ if (ret != 0) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier params check failed (%d)\n",
+ __func__, ret);
+ return NULL;
+ }
+
+ /* Allocate memory for the flow classifier */
+ cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
+ sizeof(struct rte_flow_classifier),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier memory allocation failed\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Save input parameters */
+ snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
+ params->name);
+ cls->socket_id = params->socket_id;
+ cls->type = params->type;
+
+ /* Initialize flow classifier internal data structure */
+ cls->num_tables = 0;
+
+ return cls;
+}
+
+static void
+rte_flow_classify_table_free(struct rte_table *table)
+{
+ if (table->ops.f_free != NULL)
+ table->ops.f_free(table->h_table);
+}
+
+int
+rte_flow_classifier_free(struct rte_flow_classifier *cls)
+{
+ uint32_t i;
+
+ /* Check input parameters */
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: rte_flow_classifier parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Free tables */
+ for (i = 0; i < cls->num_tables; i++) {
+ struct rte_table *table = &cls->tables[i];
+
+ rte_flow_classify_table_free(table);
+ }
+
+ /* Free flow classifier memory */
+ rte_free(cls);
+
+ return 0;
+}
+
+static int
+rte_table_check_params(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params,
+ uint32_t *table_id)
+{
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (table_id == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: table_id parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: f_create function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_lookup == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: f_lookup function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* De we have room for one more table? */
+ if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for num_tables parameter\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params,
+ uint32_t *table_id)
+{
+ struct rte_table *table;
+ void *h_table;
+ uint32_t entry_size, id;
+ int ret;
+
+ /* Check input arguments */
+ ret = rte_table_check_params(cls, params, table_id);
+ if (ret != 0)
+ return ret;
+
+ id = cls->num_tables;
+ table = &cls->tables[id];
+
+ /* calculate table entry size */
+ entry_size = sizeof(struct rte_flow_classify_table_entry);
+
+ /* Create the table */
+ h_table = params->ops->f_create(params->arg_create, cls->socket_id,
+ entry_size);
+ if (h_table == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the classifier */
+ cls->num_tables++;
+ *table_id = id;
+
+ /* Save input parameters */
+ memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+
+ /* Initialize table internal data structure */
+ table->entry_size = entry_size;
+ table->h_table = h_table;
+
+ return 0;
+}
+
+static struct rte_flow_classify_rule *
+allocate_acl_ipv4_5tuple_rule(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int log_level;
+
+ rule = malloc(sizeof(struct rte_flow_classify_rule));
+ if (!rule)
+ return rule;
+
+ memset(rule, 0, sizeof(struct rte_flow_classify_rule));
+ rule->id = unique_id++;
+ rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
+
+ memcpy(&rule->action, classify_get_flow_action(),
+ sizeof(struct rte_flow_action));
+
+ /* key add values */
+ rule->u.key.key_add.priority = ntuple_filter.priority;
+ rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
+ ntuple_filter.proto_mask;
+ rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
+ ntuple_filter.proto;
+ rule->rules.u.ipv4_5tuple.proto = ntuple_filter.proto;
+ rule->rules.u.ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
+
+ rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
+ ntuple_filter.src_ip_mask;
+ rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
+ ntuple_filter.src_ip;
+ rule->rules.u.ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
+ rule->rules.u.ipv4_5tuple.src_ip = ntuple_filter.src_ip;
+
+ rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
+ ntuple_filter.dst_ip_mask;
+ rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
+ ntuple_filter.dst_ip;
+ rule->rules.u.ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
+ rule->rules.u.ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
+
+ rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
+ ntuple_filter.src_port_mask;
+ rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
+ ntuple_filter.src_port;
+ rule->rules.u.ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
+ rule->rules.u.ipv4_5tuple.src_port = ntuple_filter.src_port;
+
+ rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
+ ntuple_filter.dst_port_mask;
+ rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
+ ntuple_filter.dst_port;
+ rule->rules.u.ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
+ rule->rules.u.ipv4_5tuple.dst_port = ntuple_filter.dst_port;
+
+ log_level = rte_log_get_level(librte_flow_classify_logtype);
+
+ if (log_level == RTE_LOG_DEBUG)
+ print_acl_ipv4_key_add(&rule->u.key.key_add);
+
+ /* key delete values */
+ memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
+ &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
+ NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
+
+ if (log_level == RTE_LOG_DEBUG)
+ print_acl_ipv4_key_delete(&rule->u.key.key_del);
+
+ return rule;
+}
+
+struct rte_flow_classify_rule *
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ int *key_found,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_classify_rule *rule;
+ struct rte_flow_classify_table_entry *table_entry;
+ int ret;
+
+ if (!error)
+ return NULL;
+
+ if (!cls) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "NULL classifier.");
+ return NULL;
+ }
+
+ if (table_id >= cls->num_tables) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid table_id.");
+ return NULL;
+ }
+
+ if (key_found == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "NULL key_found.");
+ return NULL;
+ }
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return NULL;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return NULL;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return NULL;
+ }
+
+ /* parse attr, pattern and actions */
+ ret = flow_classify_parse_flow(attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cls->type) {
+ case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
+ rule = allocate_acl_ipv4_5tuple_rule();
+ if (!rule)
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+
+ rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
+ if (!rule->entry) {
+ free(rule);
+ return NULL;
+ }
+
+ table_entry = rule->entry;
+ table_entry->rule_id = rule->id;
+
+ if (cls->tables[table_id].ops.f_add != NULL) {
+ ret = cls->tables[table_id].ops.f_add(
+ cls->tables[table_id].h_table,
+ &rule->u.key.key_add,
+ rule->entry,
+ &rule->key_found,
+ &rule->entry_ptr);
+ if (ret) {
+ free(rule->entry);
+ free(rule);
+ return NULL;
+ }
+ *key_found = rule->key_found;
+ }
+ return rule;
+}
+
+int
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_flow_classify_rule *rule)
+{
+ int ret = -EINVAL;
+
+ if (!cls || !rule || table_id >= cls->num_tables)
+ return ret;
+
+ if (cls->tables[table_id].ops.f_delete != NULL)
+ ret = cls->tables[table_id].ops.f_delete(
+ cls->tables[table_id].h_table,
+ &rule->u.key.key_del,
+ &rule->key_found,
+ &rule->entry);
+
+ return ret;
+}
+
+static int
+flow_classifier_lookup(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts)
+{
+ int ret = -EINVAL;
+ uint64_t pkts_mask;
+ uint64_t lookup_hit_mask;
+
+ pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ ret = cls->tables[table_id].ops.f_lookup(
+ cls->tables[table_id].h_table,
+ pkts, pkts_mask, &lookup_hit_mask,
+ (void **)cls->entries);
+
+ if (!ret && lookup_hit_mask)
+ cls->nb_pkts = nb_pkts;
+ else
+ cls->nb_pkts = 0;
+
+ return ret;
+}
+
+static int
+action_apply(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats)
+{
+ struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
+ uint64_t count = 0;
+ int i;
+ int ret = -EINVAL;
+
+ switch (rule->action.type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ for (i = 0; i < cls->nb_pkts; i++) {
+ if (rule->id == cls->entries[i]->rule_id)
+ count++;
+ }
+ if (count) {
+ ret = 0;
+ ntuple_stats =
+ (struct rte_flow_classify_ipv4_5tuple_stats *)
+ stats->stats;
+ ntuple_stats->counter1 = count;
+ ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
+ }
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+
+ return ret;
+}
+
+int
+rte_flow_classifier_query(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats)
+{
+ int ret = -EINVAL;
+
+ if (!cls || !rule || !stats || !pkts || nb_pkts == 0 ||
+ table_id >= cls->num_tables)
+ return ret;
+
+ ret = flow_classifier_lookup(cls, table_id, pkts, nb_pkts);
+ if (!ret)
+ ret = action_apply(cls, rule, stats);
+ return ret;
+}
+
+RTE_INIT(librte_flow_classify_init_log);
+
+static void
+librte_flow_classify_init_log(void)
+{
+ librte_flow_classify_logtype =
+ rte_log_register("librte.flow_classify");
+ if (librte_flow_classify_logtype >= 0)
+ rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO);
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify.h b/lib/librte_flow_classify/rte_flow_classify.h
new file mode 100644
index 00000000..1211873a
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify.h
@@ -0,0 +1,289 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_H_
+#define _RTE_FLOW_CLASSIFY_H_
+
+/**
+ * @file
+ *
+ * RTE Flow Classify Library
+ *
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This library provides flow record information with some measured properties.
+ *
+ * Application should define the flow and measurement criteria (action) for it.
+ *
+ * The Library doesn't maintain any flow records itself, instead flow
+ * information is returned to upper layer only for given packets.
+ *
+ * It is application's responsibility to call rte_flow_classifier_query()
+ * for a burst of packets, just after receiving them or before transmitting
+ * them.
+ * Application should provide the flow type interested in, measurement to apply
+ * to that flow in rte_flow_classify_table_entry_add() API, and should provide
+ * the rte_flow_classifier object and storage to put results in for the
+ * rte_flow_classifier_query() API.
+ *
+ * Usage:
+ * - application calls rte_flow_classifier_create() to create an
+ * rte_flow_classifier object.
+ * - application calls rte_flow_classify_table_create() to create a table
+ * in the rte_flow_classifier object.
+ * - application calls rte_flow_classify_table_entry_add() to add a rule to
+ * the table in the rte_flow_classifier object.
+ * - application calls rte_flow_classifier_query() in a polling manner,
+ * preferably after rte_eth_rx_burst(). This will cause the library to
+ * match packet information to flow information with some measurements.
+ * - rte_flow_classifier object can be destroyed when it is no longer needed
+ * with rte_flow_classifier_free()
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_acl.h>
+#include <rte_table_acl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int librte_flow_classify_logtype;
+
+#define RTE_FLOW_CLASSIFY_LOG(level, fmt, args...) \
+rte_log(RTE_LOG_ ## level, librte_flow_classify_logtype, "%s(): " fmt, \
+ __func__, ## args)
+
+/** Opaque data type for flow classifier */
+struct rte_flow_classifier;
+
+/** Opaque data type for flow classify rule */
+struct rte_flow_classify_rule;
+
+/** Flow classify rule type */
+enum rte_flow_classify_rule_type {
+ /** no type */
+ RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,
+ /** IPv4 5tuple type */
+ RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE,
+};
+
+/** Flow classify table type */
+enum rte_flow_classify_table_type {
+ /** no type */
+ RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE,
+ /** ACL type */
+ RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,
+};
+
+/**
+ * Maximum number of tables allowed for any Flow Classifier instance.
+ * The value of this parameter cannot be changed.
+ */
+#define RTE_FLOW_CLASSIFY_TABLE_MAX 64
+
+/** Parameters for flow classifier creation */
+struct rte_flow_classifier_params {
+ /** flow classifier name */
+ const char *name;
+
+ /** CPU socket ID where memory for the flow classifier and its */
+ /** elements (tables) should be allocated */
+ int socket_id;
+
+ /** Table type */
+ enum rte_flow_classify_table_type type;
+};
+
+/** Parameters for table creation */
+struct rte_flow_classify_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+
+ /** Opaque param to be passed to the table create operation */
+ void *arg_create;
+};
+
+/** IPv4 5-tuple data */
+struct rte_flow_classify_ipv4_5tuple {
+ uint32_t dst_ip; /**< Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /**< Mask of destination IP address. */
+ uint32_t src_ip; /**< Source IP address in big endian. */
+ uint32_t src_ip_mask; /**< Mask of destination IP address. */
+ uint16_t dst_port; /**< Destination port in big endian. */
+ uint16_t dst_port_mask; /**< Mask of destination port. */
+ uint16_t src_port; /**< Source Port in big endian. */
+ uint16_t src_port_mask; /**< Mask of source port. */
+ uint8_t proto; /**< L4 protocol. */
+ uint8_t proto_mask; /**< Mask of L4 protocol. */
+};
+
+/**
+ * Flow stats
+ *
+ * For the count action, stats can be returned by the query API.
+ *
+ * Storage for stats is provided by application.
+ */
+struct rte_flow_classify_stats {
+ void *stats;
+};
+
+struct rte_flow_classify_ipv4_5tuple_stats {
+ /** count of packets that match IPv4 5tuple pattern */
+ uint64_t counter1;
+ /** IPv4 5tuple data */
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+};
+
+/**
+ * Flow classifier create
+ *
+ * @param params
+ * Parameters for flow classifier creation
+ * @return
+ * Handle to flow classifier instance on success or NULL otherwise
+ */
+struct rte_flow_classifier *
+rte_flow_classifier_create(struct rte_flow_classifier_params *params);
+
+/**
+ * Flow classifier free
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int
+rte_flow_classifier_free(struct rte_flow_classifier *cls);
+
+/**
+ * Flow classify table create
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @param params
+ * Parameters for flow_classify table creation
+ * @param table_id
+ * Table ID. Valid only within the scope of table IDs of the current
+ * classifier. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int
+rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params,
+ uint32_t *table_id);
+
+/**
+ * Add a flow classify rule to the flow_classifer table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[out] key_found
+ * returns 1 if key present already, 0 otherwise.
+ * @param[in] attr
+ * Flow rule attributes
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Structure
+ * initialised in case of error only.
+ * @return
+ * A valid handle in case of success, NULL otherwise.
+ */
+struct rte_flow_classify_rule *
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ int *key_found,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+/**
+ * Delete a flow classify rule from the flow_classifer table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[in] rule
+ * Flow classify rule
+ * @return
+ * 0 on success, error code otherwise.
+ */
+int
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_flow_classify_rule *rule);
+
+/**
+ * Query flow classifier for given rule.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] table_id
+ * id of table
+ * @param[in] pkts
+ * Pointer to packets to process
+ * @param[in] nb_pkts
+ * Number of packets to process
+ * @param[in] rule
+ * Flow classify rule
+ * @param[in] stats
+ * Flow classify stats
+ *
+ * @return
+ * 0 on success, error code otherwise.
+ */
+int
+rte_flow_classifier_query(struct rte_flow_classifier *cls,
+ uint32_t table_id,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.c b/lib/librte_flow_classify/rte_flow_classify_parse.c
new file mode 100644
index 00000000..dbfa1115
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.c
@@ -0,0 +1,546 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+
+struct classify_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
+static struct rte_flow_action action;
+
+/* Pattern for IPv4 5-tuple UDP filter */
+static enum rte_flow_item_type pattern_ntuple_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern for IPv4 5-tuple TCP filter */
+static enum rte_flow_item_type pattern_ntuple_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern for IPv4 5-tuple SCTP filter */
+static enum rte_flow_item_type pattern_ntuple_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error);
+
+static struct classify_valid_pattern classify_supported_patterns[] = {
+ /* ntuple */
+ { pattern_ntuple_1, classify_parse_ntuple_filter },
+ { pattern_ntuple_2, classify_parse_ntuple_filter },
+ { pattern_ntuple_3, classify_parse_ntuple_filter },
+};
+
+struct rte_flow_action *
+classify_get_flow_action(void)
+{
+ return &action;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = classify_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = classify_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+classify_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(classify_supported_patterns); i++) {
+ if (classify_match_pattern(classify_supported_patterns[i].items,
+ pattern)) {
+ parse_filter =
+ classify_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+#define FLOW_RULE_MIN_PRIORITY 8
+#define FLOW_RULE_MAX_PRIORITY 0
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+ do {\
+ item = pattern + index;\
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+ index++;\
+ item = pattern + index;\
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+ do {\
+ act = actions + index;\
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ index++;\
+ act = actions + index;\
+ } \
+ } while (0)
+
+/**
+ * Please aware there's an assumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -EINVAL;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -EINVAL;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -EINVAL;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item,
+ "Not supported last point for range");
+ return -EINVAL;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -EINVAL;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -EINVAL;
+
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ /* get the TCP/UDP info */
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -EINVAL;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -EINVAL;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports count,
+ * check if the first not void action is COUNT.
+ */
+ memset(&action, 0, sizeof(action));
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -EINVAL;
+ }
+ action.type = RTE_FLOW_ACTION_TYPE_COUNT;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -EINVAL;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -EINVAL;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -EINVAL;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -EINVAL;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority > FLOW_RULE_MIN_PRIORITY)
+ filter->priority = FLOW_RULE_MAX_PRIORITY;
+
+ return 0;
+}
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.h b/lib/librte_flow_classify/rte_flow_classify_parse.h
new file mode 100644
index 00000000..1d4708a7
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
+#define _RTE_FLOW_CLASSIFY_PARSE_H_
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error);
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern);
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void);
+
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern);
+
+/* get action data */
+struct rte_flow_action *
+classify_get_flow_action(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */
diff --git a/lib/librte_flow_classify/rte_flow_classify_version.map b/lib/librte_flow_classify/rte_flow_classify_version.map
new file mode 100644
index 00000000..f7695cbf
--- /dev/null
+++ b/lib/librte_flow_classify/rte_flow_classify_version.map
@@ -0,0 +1,12 @@
+EXPERIMENTAL {
+ global:
+
+ rte_flow_classifier_create;
+ rte_flow_classifier_free;
+ rte_flow_classifier_query;
+ rte_flow_classify_table_create;
+ rte_flow_classify_table_entry_add;
+ rte_flow_classify_table_entry_delete;
+
+ local: *;
+};
diff --git a/lib/librte_gro/Makefile b/lib/librte_gro/Makefile
index 747eeec9..eb423ccb 100644
--- a/lib/librte_gro/Makefile
+++ b/lib/librte_gro/Makefile
@@ -36,6 +36,7 @@ LIB = librte_gro.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_net
EXPORT_MAP := rte_gro_version.map
diff --git a/lib/librte_gro/rte_gro_version.map b/lib/librte_gro/rte_gro_version.map
index bb40bb41..1606b6dc 100644
--- a/lib/librte_gro/rte_gro_version.map
+++ b/lib/librte_gro/rte_gro_version.map
@@ -1,8 +1,8 @@
DPDK_17.08 {
global:
- rte_gro_ctrl_create;
- rte_gro_ctrl_destroy;
+ rte_gro_ctx_create;
+ rte_gro_ctx_destroy;
rte_gro_get_pkt_count;
rte_gro_reassemble;
rte_gro_reassemble_burst;
diff --git a/lib/librte_gso/Makefile b/lib/librte_gso/Makefile
new file mode 100644
index 00000000..ea5ad742
--- /dev/null
+++ b/lib/librte_gso/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_gso.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_net
+LDLIBS += -lrte_mempool
+
+EXPORT_MAP := rte_gso_version.map
+
+LIBABIVER := 1
+
+#source files
+SRCS-$(CONFIG_RTE_LIBRTE_GSO) += rte_gso.c
+SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_tcp4.c
+SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_tunnel_tcp4.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_GSO)-include += rte_gso.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_gso/gso_common.c b/lib/librte_gso/gso_common.c
new file mode 100644
index 00000000..ee75d4cd
--- /dev/null
+++ b/lib/librte_gso/gso_common.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+#include <errno.h>
+
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+
+#include "gso_common.h"
+
+static inline void
+hdr_segment_init(struct rte_mbuf *hdr_segment, struct rte_mbuf *pkt,
+ uint16_t pkt_hdr_offset)
+{
+ /* Copy MBUF metadata */
+ hdr_segment->nb_segs = 1;
+ hdr_segment->port = pkt->port;
+ hdr_segment->ol_flags = pkt->ol_flags;
+ hdr_segment->packet_type = pkt->packet_type;
+ hdr_segment->pkt_len = pkt_hdr_offset;
+ hdr_segment->data_len = pkt_hdr_offset;
+ hdr_segment->tx_offload = pkt->tx_offload;
+
+ /* Copy the packet header */
+ rte_memcpy(rte_pktmbuf_mtod(hdr_segment, char *),
+ rte_pktmbuf_mtod(pkt, char *),
+ pkt_hdr_offset);
+}
+
+static inline void
+free_gso_segment(struct rte_mbuf **pkts, uint16_t nb_pkts)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+}
+
+int
+gso_do_segment(struct rte_mbuf *pkt,
+ uint16_t pkt_hdr_offset,
+ uint16_t pyld_unit_size,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out)
+{
+ struct rte_mbuf *pkt_in;
+ struct rte_mbuf *hdr_segment, *pyld_segment, *prev_segment;
+ uint16_t pkt_in_data_pos, segment_bytes_remaining;
+ uint16_t pyld_len, nb_segs;
+ bool more_in_pkt, more_out_segs;
+
+ pkt_in = pkt;
+ nb_segs = 0;
+ more_in_pkt = 1;
+ pkt_in_data_pos = pkt_hdr_offset;
+
+ while (more_in_pkt) {
+ if (unlikely(nb_segs >= nb_pkts_out)) {
+ free_gso_segment(pkts_out, nb_segs);
+ return -EINVAL;
+ }
+
+ /* Allocate a direct MBUF */
+ hdr_segment = rte_pktmbuf_alloc(direct_pool);
+ if (unlikely(hdr_segment == NULL)) {
+ free_gso_segment(pkts_out, nb_segs);
+ return -ENOMEM;
+ }
+ /* Fill the packet header */
+ hdr_segment_init(hdr_segment, pkt, pkt_hdr_offset);
+
+ prev_segment = hdr_segment;
+ segment_bytes_remaining = pyld_unit_size;
+ more_out_segs = 1;
+
+ while (more_out_segs && more_in_pkt) {
+ /* Allocate an indirect MBUF */
+ pyld_segment = rte_pktmbuf_alloc(indirect_pool);
+ if (unlikely(pyld_segment == NULL)) {
+ rte_pktmbuf_free(hdr_segment);
+ free_gso_segment(pkts_out, nb_segs);
+ return -ENOMEM;
+ }
+ /* Attach to current MBUF segment of pkt */
+ rte_pktmbuf_attach(pyld_segment, pkt_in);
+
+ prev_segment->next = pyld_segment;
+ prev_segment = pyld_segment;
+
+ pyld_len = segment_bytes_remaining;
+ if (pyld_len + pkt_in_data_pos > pkt_in->data_len)
+ pyld_len = pkt_in->data_len - pkt_in_data_pos;
+
+ pyld_segment->data_off = pkt_in_data_pos +
+ pkt_in->data_off;
+ pyld_segment->data_len = pyld_len;
+
+ /* Update header segment */
+ hdr_segment->pkt_len += pyld_len;
+ hdr_segment->nb_segs++;
+
+ pkt_in_data_pos += pyld_len;
+ segment_bytes_remaining -= pyld_len;
+
+ /* Finish processing a MBUF segment of pkt */
+ if (pkt_in_data_pos == pkt_in->data_len) {
+ pkt_in = pkt_in->next;
+ pkt_in_data_pos = 0;
+ if (pkt_in == NULL)
+ more_in_pkt = 0;
+ }
+
+ /* Finish generating a GSO segment */
+ if (segment_bytes_remaining == 0)
+ more_out_segs = 0;
+ }
+ pkts_out[nb_segs++] = hdr_segment;
+ }
+ return nb_segs;
+}
diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h
new file mode 100644
index 00000000..145ea495
--- /dev/null
+++ b/lib/librte_gso/gso_common.h
@@ -0,0 +1,171 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _GSO_COMMON_H_
+#define _GSO_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#define IS_FRAGMENTED(frag_off) (((frag_off) & IPV4_HDR_OFFSET_MASK) != 0 \
+ || ((frag_off) & IPV4_HDR_MF_FLAG) == IPV4_HDR_MF_FLAG)
+
+#define TCP_HDR_PSH_MASK ((uint8_t)0x08)
+#define TCP_HDR_FIN_MASK ((uint8_t)0x01)
+
+#define IS_IPV4_TCP(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4)) == \
+ (PKT_TX_TCP_SEG | PKT_TX_IPV4))
+
+#define IS_IPV4_VXLAN_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
+ PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_VXLAN)) == \
+ (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
+ PKT_TX_TUNNEL_VXLAN))
+
+#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
+ PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \
+ (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
+ PKT_TX_TUNNEL_GRE))
+
+/**
+ * Internal function which updates the UDP header of a packet, following
+ * segmentation. This is required to update the header's datagram length field.
+ *
+ * @param pkt
+ * The packet containing the UDP header.
+ * @param udp_offset
+ * The offset of the UDP header from the start of the packet.
+ */
+static inline void
+update_udp_header(struct rte_mbuf *pkt, uint16_t udp_offset)
+{
+ struct udp_hdr *udp_hdr;
+
+ udp_hdr = (struct udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ udp_offset);
+ udp_hdr->dgram_len = rte_cpu_to_be_16(pkt->pkt_len - udp_offset);
+}
+
+/**
+ * Internal function which updates the TCP header of a packet, following
+ * segmentation. This is required to update the header's 'sent' sequence
+ * number, and also to clear 'PSH' and 'FIN' flags for non-tail segments.
+ *
+ * @param pkt
+ * The packet containing the TCP header.
+ * @param l4_offset
+ * The offset of the TCP header from the start of the packet.
+ * @param sent_seq
+ * The sent sequence number.
+ * @param non-tail
+ * Indicates whether or not this is a tail segment.
+ */
+static inline void
+update_tcp_header(struct rte_mbuf *pkt, uint16_t l4_offset, uint32_t sent_seq,
+ uint8_t non_tail)
+{
+ struct tcp_hdr *tcp_hdr;
+
+ tcp_hdr = (struct tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ l4_offset);
+ tcp_hdr->sent_seq = rte_cpu_to_be_32(sent_seq);
+ if (likely(non_tail))
+ tcp_hdr->tcp_flags &= (~(TCP_HDR_PSH_MASK |
+ TCP_HDR_FIN_MASK));
+}
+
+/**
+ * Internal function which updates the IPv4 header of a packet, following
+ * segmentation. This is required to update the header's 'total_length' field,
+ * to reflect the reduced length of the now-segmented packet. Furthermore, the
+ * header's 'packet_id' field must be updated to reflect the new ID of the
+ * now-segmented packet.
+ *
+ * @param pkt
+ * The packet containing the IPv4 header.
+ * @param l3_offset
+ * The offset of the IPv4 header from the start of the packet.
+ * @param id
+ * The new ID of the packet.
+ */
+static inline void
+update_ipv4_header(struct rte_mbuf *pkt, uint16_t l3_offset, uint16_t id)
+{
+ struct ipv4_hdr *ipv4_hdr;
+
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ l3_offset);
+ ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - l3_offset);
+ ipv4_hdr->packet_id = rte_cpu_to_be_16(id);
+}
+
+/**
+ * Internal function which divides the input packet into small segments.
+ * Each of the newly-created segments is organized as a two-segment MBUF,
+ * where the first segment is a standard mbuf, which stores a copy of
+ * packet header, and the second is an indirect mbuf which points to a
+ * section of data in the input packet.
+ *
+ * @param pkt
+ * Packet to segment.
+ * @param pkt_hdr_offset
+ * Packet header offset, measured in bytes.
+ * @param pyld_unit_size
+ * The max payload length of a GSO segment.
+ * @param direct_pool
+ * MBUF pool used for allocating direct buffers for output segments.
+ * @param indirect_pool
+ * MBUF pool used for allocating indirect buffers for output segments.
+ * @param pkts_out
+ * Pointer array used to keep the mbuf addresses of output segments. If
+ * the memory space in pkts_out is insufficient, gso_do_segment() fails
+ * and returns -EINVAL.
+ * @param nb_pkts_out
+ * The max number of items that pkts_out can keep.
+ *
+ * @return
+ * - The number of segments created in the event of success.
+ * - Return -ENOMEM if run out of memory in MBUF pools.
+ * - Return -EINVAL for invalid parameters.
+ */
+int gso_do_segment(struct rte_mbuf *pkt,
+ uint16_t pkt_hdr_offset,
+ uint16_t pyld_unit_size,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out);
+#endif
diff --git a/lib/librte_gso/gso_tcp4.c b/lib/librte_gso/gso_tcp4.c
new file mode 100644
index 00000000..0c628cb1
--- /dev/null
+++ b/lib/librte_gso/gso_tcp4.c
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "gso_common.h"
+#include "gso_tcp4.h"
+
+static void
+update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
+ struct rte_mbuf **segs, uint16_t nb_segs)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct tcp_hdr *tcp_hdr;
+ uint32_t sent_seq;
+ uint16_t id, tail_idx, i;
+ uint16_t l3_offset = pkt->l2_len;
+ uint16_t l4_offset = l3_offset + pkt->l3_len;
+
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
+ l3_offset);
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
+ tail_idx = nb_segs - 1;
+
+ for (i = 0; i < nb_segs; i++) {
+ update_ipv4_header(segs[i], l3_offset, id);
+ update_tcp_header(segs[i], l4_offset, sent_seq, i < tail_idx);
+ id += ipid_delta;
+ sent_seq += (segs[i]->pkt_len - segs[i]->data_len);
+ }
+}
+
+int
+gso_tcp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ uint8_t ipid_delta,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ uint16_t pyld_unit_size, hdr_offset;
+ uint16_t frag_off;
+ int ret;
+
+ /* Don't process the fragmented packet */
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ pkt->l2_len);
+ frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
+ if (unlikely(IS_FRAGMENTED(frag_off))) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ /* Don't process the packet without data */
+ hdr_offset = pkt->l2_len + pkt->l3_len + pkt->l4_len;
+ if (unlikely(hdr_offset >= pkt->pkt_len)) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ pyld_unit_size = gso_size - hdr_offset;
+
+ /* Segment the payload */
+ ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
+ indirect_pool, pkts_out, nb_pkts_out);
+ if (ret > 1)
+ update_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);
+
+ return ret;
+}
diff --git a/lib/librte_gso/gso_tcp4.h b/lib/librte_gso/gso_tcp4.h
new file mode 100644
index 00000000..1c574412
--- /dev/null
+++ b/lib/librte_gso/gso_tcp4.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _GSO_TCP4_H_
+#define _GSO_TCP4_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/**
+ * Segment an IPv4/TCP packet. This function doesn't check if the input
+ * packet has correct checksums, and doesn't update checksums for output
+ * GSO segments. Furthermore, it doesn't process IP fragment packets.
+ *
+ * @param pkt
+ * The packet mbuf to segment.
+ * @param gso_size
+ * The max length of a GSO segment, measured in bytes.
+ * @param ipid_delta
+ * The increasing unit of IP ids.
+ * @param direct_pool
+ * MBUF pool used for allocating direct buffers for output segments.
+ * @param indirect_pool
+ * MBUF pool used for allocating indirect buffers for output segments.
+ * @param pkts_out
+ * Pointer array used to store the MBUF addresses of output GSO
+ * segments, when the function succeeds. If the memory space in
+ * pkts_out is insufficient, it fails and returns -EINVAL.
+ * @param nb_pkts_out
+ * The max number of items that 'pkts_out' can keep.
+ *
+ * @return
+ * - The number of GSO segments filled in pkts_out on success.
+ * - Return -ENOMEM if run out of memory in MBUF pools.
+ * - Return -EINVAL for invalid parameters.
+ */
+int gso_tcp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ uint8_t ip_delta,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out);
+#endif
diff --git a/lib/librte_gso/gso_tunnel_tcp4.c b/lib/librte_gso/gso_tunnel_tcp4.c
new file mode 100644
index 00000000..8d0cfd7a
--- /dev/null
+++ b/lib/librte_gso/gso_tunnel_tcp4.c
@@ -0,0 +1,126 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "gso_common.h"
+#include "gso_tunnel_tcp4.h"
+
+static void
+update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
+ struct rte_mbuf **segs, uint16_t nb_segs)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct tcp_hdr *tcp_hdr;
+ uint32_t sent_seq;
+ uint16_t outer_id, inner_id, tail_idx, i;
+ uint16_t outer_ipv4_offset, inner_ipv4_offset;
+ uint16_t udp_gre_offset, tcp_offset;
+ uint8_t update_udp_hdr;
+
+ outer_ipv4_offset = pkt->outer_l2_len;
+ udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len;
+ inner_ipv4_offset = udp_gre_offset + pkt->l2_len;
+ tcp_offset = inner_ipv4_offset + pkt->l3_len;
+
+ /* Outer IPv4 header. */
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ outer_ipv4_offset);
+ outer_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
+
+ /* Inner IPv4 header. */
+ ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ inner_ipv4_offset);
+ inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
+
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
+ tail_idx = nb_segs - 1;
+
+ /* Only update UDP header for VxLAN packets. */
+ update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
+
+ for (i = 0; i < nb_segs; i++) {
+ update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
+ if (update_udp_hdr)
+ update_udp_header(segs[i], udp_gre_offset);
+ update_ipv4_header(segs[i], inner_ipv4_offset, inner_id);
+ update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx);
+ outer_id++;
+ inner_id += ipid_delta;
+ sent_seq += (segs[i]->pkt_len - segs[i]->data_len);
+ }
+}
+
+int
+gso_tunnel_tcp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ uint8_t ipid_delta,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out)
+{
+ struct ipv4_hdr *inner_ipv4_hdr;
+ uint16_t pyld_unit_size, hdr_offset, frag_off;
+ int ret = 1;
+
+ hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len;
+ inner_ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ hdr_offset);
+ /*
+ * Don't process the packet whose MF bit or offset in the inner
+ * IPv4 header are non-zero.
+ */
+ frag_off = rte_be_to_cpu_16(inner_ipv4_hdr->fragment_offset);
+ if (unlikely(IS_FRAGMENTED(frag_off))) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ hdr_offset += pkt->l3_len + pkt->l4_len;
+ /* Don't process the packet without data */
+ if (hdr_offset >= pkt->pkt_len) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+ pyld_unit_size = gso_size - hdr_offset;
+
+ /* Segment the payload */
+ ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
+ indirect_pool, pkts_out, nb_pkts_out);
+ if (ret <= 1)
+ return ret;
+
+ update_tunnel_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);
+
+ return ret;
+}
diff --git a/lib/librte_gso/gso_tunnel_tcp4.h b/lib/librte_gso/gso_tunnel_tcp4.h
new file mode 100644
index 00000000..3c67f0cd
--- /dev/null
+++ b/lib/librte_gso/gso_tunnel_tcp4.h
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _GSO_TUNNEL_TCP4_H_
+#define _GSO_TUNNEL_TCP4_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/**
+ * Segment a tunneling packet with inner TCP/IPv4 headers. This function
+ * doesn't check if the input packet has correct checksums, and doesn't
+ * update checksums for output GSO segments. Furthermore, it doesn't
+ * process IP fragment packets.
+ *
+ * @param pkt
+ * The packet mbuf to segment.
+ * @param gso_size
+ * The max length of a GSO segment, measured in bytes.
+ * @param ipid_delta
+ * The increasing unit of IP ids.
+ * @param direct_pool
+ * MBUF pool used for allocating direct buffers for output segments.
+ * @param indirect_pool
+ * MBUF pool used for allocating indirect buffers for output segments.
+ * @param pkts_out
+ * Pointer array used to store the MBUF addresses of output GSO
+ * segments, when it succeeds. If the memory space in pkts_out is
+ * insufficient, it fails and returns -EINVAL.
+ * @param nb_pkts_out
+ * The max number of items that 'pkts_out' can keep.
+ *
+ * @return
+ * - The number of GSO segments filled in pkts_out on success.
+ * - Return -ENOMEM if run out of memory in MBUF pools.
+ * - Return -EINVAL for invalid parameters.
+ */
+int gso_tunnel_tcp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ uint8_t ipid_delta,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out);
+#endif
diff --git a/lib/librte_gso/rte_gso.c b/lib/librte_gso/rte_gso.c
new file mode 100644
index 00000000..f86e6541
--- /dev/null
+++ b/lib/librte_gso/rte_gso.c
@@ -0,0 +1,110 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+
+#include <rte_log.h>
+#include <rte_ethdev.h>
+
+#include "rte_gso.h"
+#include "gso_common.h"
+#include "gso_tcp4.h"
+#include "gso_tunnel_tcp4.h"
+
+int
+rte_gso_segment(struct rte_mbuf *pkt,
+ const struct rte_gso_ctx *gso_ctx,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out)
+{
+ struct rte_mempool *direct_pool, *indirect_pool;
+ struct rte_mbuf *pkt_seg;
+ uint64_t ol_flags;
+ uint16_t gso_size;
+ uint8_t ipid_delta;
+ int ret = 1;
+
+ if (pkt == NULL || pkts_out == NULL || gso_ctx == NULL ||
+ nb_pkts_out < 1 ||
+ gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
+ ((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
+ return -EINVAL;
+
+ if (gso_ctx->gso_size >= pkt->pkt_len) {
+ pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ direct_pool = gso_ctx->direct_pool;
+ indirect_pool = gso_ctx->indirect_pool;
+ gso_size = gso_ctx->gso_size;
+ ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
+ ol_flags = pkt->ol_flags;
+
+ if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+ ((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+ pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
+ direct_pool, indirect_pool,
+ pkts_out, nb_pkts_out);
+ } else if (IS_IPV4_TCP(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+ pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
+ direct_pool, indirect_pool,
+ pkts_out, nb_pkts_out);
+ } else {
+ /* unsupported packet, skip */
+ pkts_out[0] = pkt;
+ RTE_LOG(DEBUG, GSO, "Unsupported packet type\n");
+ return 1;
+ }
+
+ if (ret > 1) {
+ pkt_seg = pkt;
+ while (pkt_seg) {
+ rte_mbuf_refcnt_update(pkt_seg, -1);
+ pkt_seg = pkt_seg->next;
+ }
+ } else if (ret < 0) {
+ /* Revert the ol_flags in the event of failure. */
+ pkt->ol_flags = ol_flags;
+ }
+
+ return ret;
+}
diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h
new file mode 100644
index 00000000..4b77176f
--- /dev/null
+++ b/lib/librte_gso/rte_gso.h
@@ -0,0 +1,148 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_GSO_H_
+#define _RTE_GSO_H_
+
+/**
+ * @file
+ * Interface to GSO library
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/* Minimum GSO segment size. */
+#define RTE_GSO_SEG_SIZE_MIN (sizeof(struct ether_hdr) + \
+ sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
+
+/* GSO flags for rte_gso_ctx. */
+#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)
+/**< Use fixed IP ids for output GSO segments. Setting
+ * 0 indicates using incremental IP ids.
+ */
+
+/**
+ * GSO context structure.
+ */
+struct rte_gso_ctx {
+ struct rte_mempool *direct_pool;
+ /**< MBUF pool for allocating direct buffers, which are used
+ * to store packet headers for GSO segments.
+ */
+ struct rte_mempool *indirect_pool;
+ /**< MBUF pool for allocating indirect buffers, which are used
+ * to locate packet payloads for GSO segments. The indirect
+ * buffer doesn't contain any data, but simply points to an
+ * offset within the packet to segment.
+ */
+ uint64_t flag;
+ /**< flag that controls specific attributes of output segments,
+ * such as the type of IP ID generated (i.e. fixed or incremental).
+ */
+ uint32_t gso_types;
+ /**< the bit mask of required GSO types. The GSO library
+ * uses the same macros as that of describing device TX
+ * offloading capabilities (i.e. DEV_TX_OFFLOAD_*_TSO) for
+ * gso_types.
+ *
+ * For example, if applications want to segment TCP/IPv4
+ * packets, set DEV_TX_OFFLOAD_TCP_TSO in gso_types.
+ */
+ uint16_t gso_size;
+ /**< maximum size of an output GSO segment, including packet
+ * header and payload, measured in bytes. Must exceed
+ * RTE_GSO_SEG_SIZE_MIN.
+ */
+};
+
+/**
+ * Segmentation function, which supports processing of both single- and
+ * multi- MBUF packets.
+ *
+ * Note that we refer to the packets that are segmented from the input
+ * packet as 'GSO segments'. rte_gso_segment() doesn't check if the
+ * input packet has correct checksums, and doesn't update checksums for
+ * output GSO segments. Additionally, it doesn't process IP fragment
+ * packets.
+ *
+ * Before calling rte_gso_segment(), applications must set proper ol_flags
+ * for the packet. The GSO library uses the same macros as that of TSO.
+ * For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment
+ * a TCP/IPv4 packet. If rte_gso_segment() succceds, the PKT_TX_TCP_SEG
+ * flag is removed for all GSO segments and the input packet.
+ *
+ * Each of the newly-created GSO segments is organized as a two-segment
+ * MBUF, where the first segment is a standard MBUF, which stores a copy
+ * of packet header, and the second is an indirect MBUF which points to
+ * a section of data in the input packet. Since each GSO segment has
+ * multiple MBUFs (i.e. typically 2 MBUFs), the driver of the interface which
+ * the GSO segments are sent to should support transmission of multi-segment
+ * packets.
+ *
+ * If the input packet is GSO'd, its mbuf refcnt reduces by 1. Therefore,
+ * when all GSO segments are freed, the input packet is freed automatically.
+ *
+ * If the memory space in pkts_out or MBUF pools is insufficient, this
+ * function fails, and it returns (-1) * errno. Otherwise, GSO succeeds,
+ * and this function returns the number of output GSO segments filled in
+ * pkts_out.
+ *
+ * @param pkt
+ * The packet mbuf to segment.
+ * @param ctx
+ * GSO context object pointer.
+ * @param pkts_out
+ * Pointer array used to store the MBUF addresses of output GSO
+ * segments, when rte_gso_segment() succeeds.
+ * @param nb_pkts_out
+ * The max number of items that pkts_out can keep.
+ *
+ * @return
+ * - The number of GSO segments filled in pkts_out on success.
+ * - Return -ENOMEM if run out of memory in MBUF pools.
+ * - Return -EINVAL for invalid parameters.
+ */
+int rte_gso_segment(struct rte_mbuf *pkt,
+ const struct rte_gso_ctx *ctx,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_GSO_H_ */
diff --git a/lib/librte_gso/rte_gso_version.map b/lib/librte_gso/rte_gso_version.map
new file mode 100644
index 00000000..e1fd453e
--- /dev/null
+++ b/lib/librte_gso/rte_gso_version.map
@@ -0,0 +1,7 @@
+DPDK_17.11 {
+ global:
+
+ rte_gso_segment;
+
+ local: *;
+};
diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
index 9cf13a04..1655b601 100644
--- a/lib/librte_hash/Makefile
+++ b/lib/librte_hash/Makefile
@@ -36,6 +36,7 @@ LIB = librte_hash.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_ring
EXPORT_MAP := rte_hash_version.map
diff --git a/lib/librte_hash/rte_crc_arm64.h b/lib/librte_hash/rte_crc_arm64.h
index 774428be..a3c216bb 100644
--- a/lib/librte_hash/rte_crc_arm64.h
+++ b/lib/librte_hash/rte_crc_arm64.h
@@ -116,8 +116,7 @@ rte_hash_crc_set_alg(uint8_t alg)
}
/* Setting the best available algorithm */
-static inline void __attribute__((constructor))
-rte_hash_crc_init_alg(void)
+RTE_INIT(rte_hash_crc_init_alg)
{
rte_hash_crc_set_alg(CRC32_ARM64);
}
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 87b25c01..55fd7bdc 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -44,7 +44,6 @@
#include <rte_memcpy.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
-#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
@@ -417,9 +416,9 @@ rte_hash_reset(struct rte_hash *h)
/* Search for an entry that can be pushed to its alternative location */
static inline int
-make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
+make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt,
+ unsigned int *nr_pushes)
{
- static unsigned int nr_pushes;
unsigned i, j;
int ret;
uint32_t next_bucket_idx;
@@ -456,15 +455,14 @@ make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
break;
/* All entries have been pushed, so entry cannot be added */
- if (i == RTE_HASH_BUCKET_ENTRIES || nr_pushes > RTE_HASH_MAX_PUSHES)
+ if (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES)
return -ENOSPC;
/* Set flag to indicate that this entry is going to be pushed */
bkt->flag[i] = 1;
- nr_pushes++;
/* Need room in alternative bucket to insert the pushed entry */
- ret = make_space_bucket(h, next_bkt[i]);
+ ret = make_space_bucket(h, next_bkt[i], nr_pushes);
/*
* After recursive function.
* Clear flags and insert the pushed entry
@@ -472,7 +470,6 @@ make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
* or return error
*/
bkt->flag[i] = 0;
- nr_pushes = 0;
if (ret >= 0) {
next_bkt[i]->sig_alt[ret] = bkt->sig_current[i];
next_bkt[i]->sig_current[ret] = bkt->sig_alt[i];
@@ -515,6 +512,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
unsigned n_slots;
unsigned lcore_id;
struct lcore_cache *cached_free_slots = NULL;
+ unsigned int nr_pushes = 0;
if (h->add_key == ADD_KEY_MULTIWRITER)
rte_spinlock_lock(h->multiwriter_lock);
@@ -648,7 +646,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
* if successful or return error and
* store the new slot back in the ring
*/
- ret = make_space_bucket(h, prim_bkt);
+ ret = make_space_bucket(h, prim_bkt, &nr_pushes);
if (ret >= 0) {
prim_bkt->sig_current[ret] = sig;
prim_bkt->sig_alt[ret] = alt_hash;
diff --git a/lib/librte_hash/rte_fbk_hash.c b/lib/librte_hash/rte_fbk_hash.c
index 55c9f358..c87719fb 100644
--- a/lib/librte_hash/rte_fbk_hash.c
+++ b/lib/librte_hash/rte_fbk_hash.c
@@ -39,7 +39,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
index ea6be522..4f815aea 100644
--- a/lib/librte_hash/rte_hash_crc.h
+++ b/lib/librte_hash/rte_hash_crc.h
@@ -480,8 +480,7 @@ rte_hash_crc_set_alg(uint8_t alg)
}
/* Setting the best available algorithm */
-static inline void __attribute__((constructor))
-rte_hash_crc_init_alg(void)
+RTE_INIT(rte_hash_crc_init_alg)
{
rte_hash_crc_set_alg(CRC32_SSE42_x64);
}
diff --git a/lib/librte_hash/rte_jhash.h b/lib/librte_hash/rte_jhash.h
index 207478c2..3eca1385 100644
--- a/lib/librte_hash/rte_jhash.h
+++ b/lib/librte_hash/rte_jhash.h
@@ -290,7 +290,10 @@ rte_jhash_32b_2hashes(const uint32_t *k, uint32_t length, uint32_t *pc, uint32_t
/**
* The most generic version, hashes an arbitrary sequence
* of bytes. No alignment or length assumptions are made about
- * the input key.
+ * the input key. For keys not aligned to four byte boundaries
+ * or a multiple of four bytes in length, the memory region
+ * just after may be read (but not used in the computation).
+ * This may cross a page boundary.
*
* @param key
* Key to calculate hash of.
diff --git a/lib/librte_hash/rte_thash.h b/lib/librte_hash/rte_thash.h
index 2fffd61d..4fa5e07a 100644
--- a/lib/librte_hash/rte_thash.h
+++ b/lib/librte_hash/rte_thash.h
@@ -207,15 +207,14 @@ static inline uint32_t
rte_softrss(uint32_t *input_tuple, uint32_t input_len,
const uint8_t *rss_key)
{
- uint32_t i, j, ret = 0;
+ uint32_t i, j, map, ret = 0;
for (j = 0; j < input_len; j++) {
- for (i = 0; i < 32; i++) {
- if (input_tuple[j] & (1 << (31 - i))) {
- ret ^= rte_cpu_to_be_32(((const uint32_t *)rss_key)[j]) << i |
+ for (map = input_tuple[j]; map; map &= (map - 1)) {
+ i = rte_bsf32(map);
+ ret ^= rte_cpu_to_be_32(((const uint32_t *)rss_key)[j]) << (31 - i) |
(uint32_t)((uint64_t)(rte_cpu_to_be_32(((const uint32_t *)rss_key)[j + 1])) >>
- (32 - i));
- }
+ (i + 1));
}
}
return ret;
@@ -238,14 +237,13 @@ static inline uint32_t
rte_softrss_be(uint32_t *input_tuple, uint32_t input_len,
const uint8_t *rss_key)
{
- uint32_t i, j, ret = 0;
+ uint32_t i, j, map, ret = 0;
for (j = 0; j < input_len; j++) {
- for (i = 0; i < 32; i++) {
- if (input_tuple[j] & (1 << (31 - i))) {
- ret ^= ((const uint32_t *)rss_key)[j] << i |
- (uint32_t)((uint64_t)(((const uint32_t *)rss_key)[j + 1]) >> (32 - i));
- }
+ for (map = input_tuple[j]; map; map &= (map - 1)) {
+ i = rte_bsf32(map);
+ ret ^= ((const uint32_t *)rss_key)[j] << (31 - i) |
+ (uint32_t)((uint64_t)(((const uint32_t *)rss_key)[j + 1]) >> (i + 1));
}
}
return ret;
diff --git a/lib/librte_ip_frag/Makefile b/lib/librte_ip_frag/Makefile
index 4e693bf8..aff94b8c 100644
--- a/lib/librte_ip_frag/Makefile
+++ b/lib/librte_ip_frag/Makefile
@@ -36,8 +36,10 @@ LIB = librte_ip_frag.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev
+LDLIBS += -lrte_hash
-EXPORT_MAP := rte_ipfrag_version.map
+EXPORT_MAP := rte_ip_frag_version.map
LIBABIVER := 1
diff --git a/lib/librte_ip_frag/rte_ipfrag_version.map b/lib/librte_ip_frag/rte_ip_frag_version.map
index d1acf07c..d1acf07c 100644
--- a/lib/librte_ip_frag/rte_ipfrag_version.map
+++ b/lib/librte_ip_frag/rte_ip_frag_version.map
diff --git a/lib/librte_jobstats/Makefile b/lib/librte_jobstats/Makefile
index 561a0678..d0bddd12 100644
--- a/lib/librte_jobstats/Makefile
+++ b/lib/librte_jobstats/Makefile
@@ -36,6 +36,7 @@ LIB = librte_jobstats.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_jobstats_version.map
diff --git a/lib/librte_jobstats/rte_jobstats.h b/lib/librte_jobstats/rte_jobstats.h
index 7e76fd50..70e034ca 100644
--- a/lib/librte_jobstats/rte_jobstats.h
+++ b/lib/librte_jobstats/rte_jobstats.h
@@ -117,7 +117,7 @@ struct rte_jobstats_context {
/**< Minimum loop execute time. */
uint64_t max_exec_time;
- /**< Minimum loop execute time. */
+ /**< Maximum loop execute time. */
/**
* Sum of time that is not the execute time (ex: from job finish to next
diff --git a/lib/librte_kni/Makefile b/lib/librte_kni/Makefile
index 70f1ca8f..56b19760 100644
--- a/lib/librte_kni/Makefile
+++ b/lib/librte_kni/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_kni.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev
EXPORT_MAP := rte_kni_version.map
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 8c483c1f..5ee38e9a 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -456,7 +456,7 @@ va2pa(struct rte_mbuf *m)
{
return (void *)((unsigned long)m -
((unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr));
+ (unsigned long)m->buf_iova));
}
static void
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 37deb472..d1950791 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -63,13 +63,13 @@ struct rte_mbuf;
* Structure which has the function pointers for KNI interface.
*/
struct rte_kni_ops {
- uint8_t port_id; /* Port ID */
+ uint16_t port_id; /* Port ID */
/* Pointer to function of changing MTU */
- int (*change_mtu)(uint8_t port_id, unsigned new_mtu);
+ int (*change_mtu)(uint16_t port_id, unsigned int new_mtu);
/* Pointer to function of configuring network interface */
- int (*config_network_if)(uint8_t port_id, uint8_t if_up);
+ int (*config_network_if)(uint16_t port_id, uint8_t if_up);
};
/**
@@ -118,7 +118,7 @@ void rte_kni_init(unsigned int max_kni_ifaces);
* elements for each KNI interface allocated.
*
* @param pktmbuf_pool
- * The mempool for allocting mbufs for packets.
+ * The mempool for allocating mbufs for packets.
* @param conf
* The pointer to the configurations of the KNI device.
* @param ops
diff --git a/lib/librte_kvargs/Makefile b/lib/librte_kvargs/Makefile
index 564dd310..4eaa9334 100644
--- a/lib/librte_kvargs/Makefile
+++ b/lib/librte_kvargs/Makefile
@@ -37,6 +37,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_kvargs.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_kvargs_version.map
diff --git a/lib/librte_latencystats/Makefile b/lib/librte_latencystats/Makefile
index eaacbb73..665c7b41 100644
--- a/lib/librte_latencystats/Makefile
+++ b/lib/librte_latencystats/Makefile
@@ -36,6 +36,7 @@ LIB = librte_latencystats.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
LDLIBS += -lm
LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_metrics -lrte_ethdev -lrte_mbuf
EXPORT_MAP := rte_latencystats_version.map
diff --git a/lib/librte_latencystats/rte_latencystats.c b/lib/librte_latencystats/rte_latencystats.c
index ce029a12..d6ad13c4 100644
--- a/lib/librte_latencystats/rte_latencystats.c
+++ b/lib/librte_latencystats/rte_latencystats.c
@@ -135,7 +135,7 @@ rte_latencystats_fill_values(struct rte_metric_value *values)
}
static uint16_t
-add_time_stamps(uint8_t pid __rte_unused,
+add_time_stamps(uint16_t pid __rte_unused,
uint16_t qid __rte_unused,
struct rte_mbuf **pkts,
uint16_t nb_pkts,
@@ -165,7 +165,7 @@ add_time_stamps(uint8_t pid __rte_unused,
}
static uint16_t
-calc_latency(uint8_t pid __rte_unused,
+calc_latency(uint16_t pid __rte_unused,
uint16_t qid __rte_unused,
struct rte_mbuf **pkts,
uint16_t nb_pkts,
@@ -226,10 +226,10 @@ rte_latencystats_init(uint64_t app_samp_intvl,
rte_latency_stats_flow_type_fn user_cb)
{
unsigned int i;
- uint8_t pid;
+ uint16_t pid;
uint16_t qid;
struct rxtx_cbs *cbs = NULL;
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const char *ptr_strings[NUM_LATENCY_STATS] = {0};
const struct rte_memzone *mz = NULL;
const unsigned int flags = 0;
@@ -290,11 +290,11 @@ rte_latencystats_init(uint64_t app_samp_intvl,
int
rte_latencystats_uninit(void)
{
- uint8_t pid;
+ uint16_t pid;
uint16_t qid;
int ret = 0;
struct rxtx_cbs *cbs = NULL;
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
/** De register Rx/Tx callbacks */
for (pid = 0; pid < nb_ports; pid++) {
diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile
index 32be46b3..2e8749e8 100644
--- a/lib/librte_lpm/Makefile
+++ b/lib/librte_lpm/Makefile
@@ -36,6 +36,7 @@ LIB = librte_lpm.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_lpm_version.map
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 64c074e9..e1f1fad5 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -43,7 +43,6 @@
#include <rte_common.h>
#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_malloc.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
@@ -218,6 +217,7 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+ rte_errno = ENOMEM;
goto exit;
}
@@ -227,6 +227,7 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
if (lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
@@ -292,6 +293,7 @@ rte_lpm_create_v1604(const char *name, int socket_id,
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+ rte_errno = ENOMEM;
goto exit;
}
@@ -301,6 +303,7 @@ rte_lpm_create_v1604(const char *name, int socket_id,
if (lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
@@ -312,6 +315,7 @@ rte_lpm_create_v1604(const char *name, int socket_id,
rte_free(lpm);
lpm = NULL;
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
@@ -324,6 +328,7 @@ rte_lpm_create_v1604(const char *name, int socket_id,
rte_free(lpm);
lpm = NULL;
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c
index b4a7df34..03668d9e 100644
--- a/lib/librte_lpm/rte_lpm6.c
+++ b/lib/librte_lpm/rte_lpm6.c
@@ -42,7 +42,6 @@
#include <rte_common.h>
#include <rte_memory.h>
#include <rte_malloc.h>
-#include <rte_memzone.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
@@ -191,6 +190,7 @@ rte_lpm6_create(const char *name, int socket_id,
te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
+ rte_errno = ENOMEM;
goto exit;
}
@@ -201,6 +201,7 @@ rte_lpm6_create(const char *name, int socket_id,
if (lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
@@ -212,6 +213,7 @@ rte_lpm6_create(const char *name, int socket_id,
rte_free(lpm);
lpm = NULL;
rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
@@ -518,7 +520,7 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
uint32_t next_hop)
{
struct rte_lpm6_tbl_entry *tbl;
- struct rte_lpm6_tbl_entry *tbl_next;
+ struct rte_lpm6_tbl_entry *tbl_next = NULL;
int32_t rule_index;
int status;
uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
index 54827305..f6be3536 100644
--- a/lib/librte_mbuf/Makefile
+++ b/lib/librte_mbuf/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mbuf.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal -lrte_mempool
EXPORT_MAP := rte_mbuf_version.map
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 26a62b8e..2e08b9e9 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -46,7 +46,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -135,7 +134,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
/* start of buffer is after mbuf structure and priv data */
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
@@ -144,7 +143,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
/* init some constant fields */
m->pool = mp;
m->nb_segs = 1;
- m->port = 0xff;
+ m->port = MBUF_INVALID_PORT;
rte_mbuf_refcnt_set(m, 1);
m->next = NULL;
}
@@ -157,6 +156,7 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
{
struct rte_mempool *mp;
struct rte_pktmbuf_pool_private mbp_priv;
+ const char *mp_ops_name;
unsigned elt_size;
int ret;
@@ -176,8 +176,8 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
if (mp == NULL)
return NULL;
- ret = rte_mempool_set_ops_byname(mp,
- RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ mp_ops_name = rte_eal_mbuf_default_mempool_ops();
+ ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
if (ret != 0) {
RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
rte_mempool_free(mp);
@@ -211,8 +211,8 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
/* generic checks */
if (m->pool == NULL)
rte_panic("bad mbuf pool\n");
- if (m->buf_physaddr == 0)
- rte_panic("bad phys addr\n");
+ if (m->buf_iova == 0)
+ rte_panic("bad IO addr\n");
if (m->buf_addr == NULL)
rte_panic("bad virt addr\n");
@@ -243,8 +243,8 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
__rte_mbuf_sanity_check(m, 1);
- fprintf(f, "dump mbuf at %p, phys=%"PRIx64", buf_len=%u\n",
- m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
+ fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
+ m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
"in_port=%u\n", m->pkt_len, m->ol_flags,
(unsigned)m->nb_segs, (unsigned)m->port);
@@ -307,7 +307,7 @@ const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
const char *rte_get_rx_ol_flag_name(uint64_t mask)
{
switch (mask) {
- case PKT_RX_VLAN_PKT: return "PKT_RX_VLAN_PKT";
+ case PKT_RX_VLAN: return "PKT_RX_VLAN";
case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
case PKT_RX_FDIR: return "PKT_RX_FDIR";
case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
@@ -323,6 +323,8 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask)
case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
case PKT_RX_LRO: return "PKT_RX_LRO";
case PKT_RX_TIMESTAMP: return "PKT_RX_TIMESTAMP";
+ case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD";
+ case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED";
default: return NULL;
}
}
@@ -338,7 +340,7 @@ int
rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{
const struct flag_mask rx_flags[] = {
- { PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, NULL },
+ { PKT_RX_VLAN, PKT_RX_VLAN, NULL },
{ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL },
{ PKT_RX_FDIR, PKT_RX_FDIR, NULL },
{ PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL },
@@ -358,6 +360,9 @@ rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{ PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
{ PKT_RX_LRO, PKT_RX_LRO, NULL },
{ PKT_RX_TIMESTAMP, PKT_RX_TIMESTAMP, NULL },
+ { PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL },
+ { PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL },
+ { PKT_RX_QINQ, PKT_RX_QINQ, NULL },
};
const char *name;
unsigned int i;
@@ -410,6 +415,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
+ case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
default: return NULL;
}
}
@@ -443,6 +449,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{ PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK,
"PKT_TX_TUNNEL_NONE" },
{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
+ { PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
};
const char *name;
unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index eaed7eee..6d91f7d3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -89,12 +89,13 @@ extern "C" {
*/
/**
- * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when
- * the packet is recognized as a VLAN, but the behavior between PMDs
- * was not the same. This flag is kept for some time to avoid breaking
- * applications and should be replaced by PKT_RX_VLAN_STRIPPED.
+ * The RX packet is a 802.1q VLAN packet, and the tci has been
+ * saved in in mbuf->vlan_tci.
+ * If the flag PKT_RX_VLAN_STRIPPED is also present, the VLAN
+ * header has been stripped from mbuf data, else it is still
+ * present.
*/
-#define PKT_RX_VLAN_PKT (1ULL << 0)
+#define PKT_RX_VLAN (1ULL << 0)
#define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */
#define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */
@@ -123,6 +124,7 @@ extern "C" {
* A vlan has been stripped by the hardware and its tci is saved in
* mbuf->vlan_tci. This can only happen if vlan stripping is enabled
* in the RX configuration of the PMD.
+ * When PKT_RX_VLAN_STRIPPED is set, PKT_RX_VLAN must also be set.
*/
#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
@@ -165,19 +167,13 @@ extern "C" {
* The 2 vlans have been stripped by the hardware and their tci are
* saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
* This can only happen if vlan stripping is enabled in the RX
- * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED
- * must also be set.
+ * configuration of the PMD. If this flag is set,
+ * When PKT_RX_QINQ_STRIPPED is set, the flags (PKT_RX_VLAN |
+ * PKT_RX_VLAN_STRIPPED | PKT_RX_QINQ) must also be set.
*/
#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
/**
- * Deprecated.
- * RX packet with double VLAN stripped.
- * This flag is replaced by PKT_RX_QINQ_STRIPPED.
- */
-#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
-
-/**
* When packets are coalesced by a hardware or virtual driver, this flag
* can be set in the RX mbuf, meaning that the m->tso_segsz field is
* valid and is set to the segment size of original packets.
@@ -189,11 +185,35 @@ extern "C" {
*/
#define PKT_RX_TIMESTAMP (1ULL << 17)
+/**
+ * Indicate that security offload processing was applied on the RX packet.
+ */
+#define PKT_RX_SEC_OFFLOAD (1ULL << 18)
+
+/**
+ * Indicate that security offload processing failed on the RX packet.
+ */
+#define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
+
+/**
+ * The RX packet is a double VLAN, and the outer tci has been
+ * saved in in mbuf->vlan_tci_outer.
+ * If the flag PKT_RX_QINQ_STRIPPED is also present, both VLANs
+ * headers have been stripped from mbuf data, else they are still
+ * present.
+ */
+#define PKT_RX_QINQ (1ULL << 20)
+
/* add new RX flags here */
/* add new TX flags here */
/**
+ * Request security offload processing on the TX packet.
+ */
+#define PKT_TX_SEC_OFFLOAD (1ULL << 43)
+
+/**
* Offload the MACsec. This flag must be set by the application to enable
* this offload feature for a packet to be transmitted.
*/
@@ -316,7 +336,8 @@ extern "C" {
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_MASK | \
- PKT_TX_MACSEC)
+ PKT_TX_MACSEC | \
+ PKT_TX_SEC_OFFLOAD)
#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
@@ -411,7 +432,11 @@ struct rte_mbuf {
* same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
* working on vector drivers easier.
*/
- phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr; /**< deprecated */
+ } __rte_aligned(sizeof(rte_iova_t));
/* next 8 bytes are initialised on RX descriptor rearm */
MARKER64 rearm_data;
@@ -456,8 +481,21 @@ struct rte_mbuf {
uint32_t l3_type:4; /**< (Outer) L3 type. */
uint32_t l4_type:4; /**< (Outer) L4 type. */
uint32_t tun_type:4; /**< Tunnel type. */
- uint32_t inner_l2_type:4; /**< Inner L2 type. */
- uint32_t inner_l3_type:4; /**< Inner L3 type. */
+ RTE_STD_C11
+ union {
+ uint8_t inner_esp_next_proto;
+ /**< ESP next protocol type, valid if
+ * RTE_PTYPE_TUNNEL_ESP tunnel type is set
+ * on both Tx and Rx.
+ */
+ __extension__
+ struct {
+ uint8_t inner_l2_type:4;
+ /**< Inner L2 type. */
+ uint8_t inner_l3_type:4;
+ /**< Inner L3 type. */
+ };
+ };
uint32_t inner_l4_type:4; /**< Inner L4 type. */
};
};
@@ -587,21 +625,28 @@ rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
/**
- * Return the DMA address of the beginning of the mbuf data
+ * Return the IO address of the beginning of the mbuf data
*
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + mb->data_off;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + mb->data_off;
+ return rte_mbuf_data_iova(mb);
}
/**
- * Return the default DMA address of the beginning of the mbuf data
+ * Return the default IO address of the beginning of the mbuf data
*
* This function is used by drivers in their receive function, as it
* returns the location where data should be written by the NIC, taking
@@ -610,12 +655,19 @@ rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
{
- return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ return rte_mbuf_data_iova_default(mb);
}
/**
@@ -806,7 +858,7 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
* For standard needs, prefer rte_pktmbuf_alloc().
*
* The caller can expect that the following fields of the mbuf structure
- * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1,
+ * are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1,
* next=NULL, pool, priv_size. The other fields must be initialized
* by the caller.
*
@@ -1087,6 +1139,8 @@ static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
* @param m
* The packet mbuf to be resetted.
*/
+#define MBUF_INVALID_PORT UINT16_MAX
+
static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
{
m->next = NULL;
@@ -1095,7 +1149,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
m->vlan_tci = 0;
m->vlan_tci_outer = 0;
m->nb_segs = 1;
- m->port = 0xff;
+ m->port = MBUF_INVALID_PORT;
m->ol_flags = 0;
m->packet_type = 0;
@@ -1214,7 +1268,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
rte_mbuf_refcnt_update(md, 1);
mi->priv_size = m->priv_size;
- mi->buf_physaddr = m->buf_physaddr;
+ mi->buf_iova = m->buf_iova;
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
@@ -1262,7 +1316,7 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
m->priv_size = priv_size;
m->buf_addr = (char *)m + mbuf_size;
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
m->buf_len = (uint16_t)buf_len;
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
@@ -1524,7 +1578,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
/**
- * A macro that returns the physical address that points to an offset of the
+ * A macro that returns the IO address that points to an offset of the
* start of the data in the mbuf
*
* @param m
@@ -1532,17 +1586,24 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
* @param o
* The offset into the data to calculate address from.
*/
+#define rte_pktmbuf_iova_offset(m, o) \
+ (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
+
+/* deprecated */
#define rte_pktmbuf_mtophys_offset(m, o) \
- (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
+ rte_pktmbuf_iova_offset(m, o)
/**
- * A macro that returns the physical address that points to the start of the
+ * A macro that returns the IO address that points to the start of the
* data in the mbuf
*
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
+#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
+
+/* deprecated */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
/**
* A macro that returns the length of the packet.
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index e5c4fae3..a623226c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -89,6 +89,9 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
case RTE_PTYPE_TUNNEL_NVGRE: return "TUNNEL_NVGRE";
case RTE_PTYPE_TUNNEL_GENEVE: return "TUNNEL_GENEVE";
case RTE_PTYPE_TUNNEL_GRENAT: return "TUNNEL_GRENAT";
+ case RTE_PTYPE_TUNNEL_GTPC: return "TUNNEL_GTPC";
+ case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
+ case RTE_PTYPE_TUNNEL_ESP: return "TUNNEL_ESP";
default: return "TUNNEL_UNKNOWN";
}
}
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index acd70bb6..5c62435c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -383,6 +383,49 @@ extern "C" {
*/
#define RTE_PTYPE_TUNNEL_GRENAT 0x00006000
/**
+ * GTP-C (GPRS Tunnelling Protocol) control tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2123>
+ * or,
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'source port'=2123>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'source port'=2123>
+ */
+#define RTE_PTYPE_TUNNEL_GTPC 0x00007000
+/**
+ * GTP-U (GPRS Tunnelling Protocol) user data tunneling packet type.
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=2152>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=2152>
+ */
+#define RTE_PTYPE_TUNNEL_GTPU 0x00008000
+/**
+ * ESP (IP Encapsulating Security Payload) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=51>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=51>
+ */
+#define RTE_PTYPE_TUNNEL_ESP 0x00009000
+/**
* Mask of tunneling packet types.
*/
#define RTE_PTYPE_TUNNEL_MASK 0x0000f000
diff --git a/lib/librte_eal/linuxapp/xen_dom0/Makefile b/lib/librte_member/Makefile
index be51a82a..f4cf101e 100644
--- a/lib/librte_eal/linuxapp/xen_dom0/Makefile
+++ b/lib/librte_member/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -31,23 +31,22 @@
include $(RTE_SDK)/mk/rte.vars.mk
-#
-# module name and path
-#
-MODULE = rte_dom0_mm
+# library name
+LIB = librte_member.a
-#
-# CFLAGS
-#
-MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
-MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
-MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-MODULE_CFLAGS += -Wall -Werror
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += $(WERROR_FLAGS) -O3
-#
-# all source are stored in SRCS-y
-#
+LDLIBS += -lm
+LDLIBS += -lrte_eal -lrte_hash
+
+EXPORT_MAP := rte_member_version.map
-SRCS-y += dom0_mm_misc.c
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MEMBER) += rte_member.c rte_member_ht.c rte_member_vbf.c
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMBER)-include := rte_member.h
-include $(RTE_SDK)/mk/rte.module.mk
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_member/rte_member.c b/lib/librte_member/rte_member.c
new file mode 100644
index 00000000..cc9ea84a
--- /dev/null
+++ b/lib/librte_member/rte_member.c
@@ -0,0 +1,336 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "rte_member.h"
+#include "rte_member_ht.h"
+#include "rte_member_vbf.h"
+
+int librte_member_logtype;
+
+TAILQ_HEAD(rte_member_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_member_tailq = {
+ .name = "RTE_MEMBER",
+};
+EAL_REGISTER_TAILQ(rte_member_tailq)
+
+struct rte_member_setsum *
+rte_member_find_existing(const char *name)
+{
+ struct rte_member_setsum *setsum = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_member_list *member_list;
+
+ member_list = RTE_TAILQ_CAST(rte_member_tailq.head, rte_member_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, member_list, next) {
+ setsum = (struct rte_member_setsum *) te->data;
+ if (strncmp(name, setsum->name, RTE_MEMBER_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return setsum;
+}
+
+void
+rte_member_free(struct rte_member_setsum *setsum)
+{
+ struct rte_member_list *member_list;
+ struct rte_tailq_entry *te;
+
+ if (setsum == NULL)
+ return;
+ member_list = RTE_TAILQ_CAST(rte_member_tailq.head, rte_member_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, member_list, next) {
+ if (te->data == (void *)setsum)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+ TAILQ_REMOVE(member_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ rte_member_free_ht(setsum);
+ break;
+ case RTE_MEMBER_TYPE_VBF:
+ rte_member_free_vbf(setsum);
+ break;
+ default:
+ break;
+ }
+ rte_free(setsum);
+ rte_free(te);
+}
+
+struct rte_member_setsum *
+rte_member_create(const struct rte_member_parameters *params)
+{
+ struct rte_tailq_entry *te;
+ struct rte_member_list *member_list;
+ struct rte_member_setsum *setsum;
+ int ret;
+
+ if (params == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ if (params->key_len == 0 ||
+ params->prim_hash_seed == params->sec_hash_seed) {
+ rte_errno = EINVAL;
+ RTE_MEMBER_LOG(ERR, "Create setsummary with "
+ "invalid parameters\n");
+ return NULL;
+ }
+
+ member_list = RTE_TAILQ_CAST(rte_member_tailq.head, rte_member_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, member_list, next) {
+ setsum = (struct rte_member_setsum *) te->data;
+ if (strncmp(params->name, setsum->name,
+ RTE_MEMBER_NAMESIZE) == 0)
+ break;
+ }
+ setsum = NULL;
+ if (te != NULL) {
+ rte_errno = EEXIST;
+ te = NULL;
+ goto error_unlock_exit;
+ }
+ te = rte_zmalloc("MEMBER_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_MEMBER_LOG(ERR, "tailq entry allocation failed\n");
+ goto error_unlock_exit;
+ }
+
+ /* Create a new setsum structure */
+ setsum = (struct rte_member_setsum *) rte_zmalloc_socket(params->name,
+ sizeof(struct rte_member_setsum), RTE_CACHE_LINE_SIZE,
+ params->socket_id);
+ if (setsum == NULL) {
+ RTE_MEMBER_LOG(ERR, "Create setsummary failed\n");
+ goto error_unlock_exit;
+ }
+ snprintf(setsum->name, sizeof(setsum->name), "%s", params->name);
+ setsum->type = params->type;
+ setsum->socket_id = params->socket_id;
+ setsum->key_len = params->key_len;
+ setsum->num_set = params->num_set;
+ setsum->prim_hash_seed = params->prim_hash_seed;
+ setsum->sec_hash_seed = params->sec_hash_seed;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ ret = rte_member_create_ht(setsum, params);
+ break;
+ case RTE_MEMBER_TYPE_VBF:
+ ret = rte_member_create_vbf(setsum, params);
+ break;
+ default:
+ goto error_unlock_exit;
+ }
+ if (ret < 0)
+ goto error_unlock_exit;
+
+ RTE_MEMBER_LOG(DEBUG, "Creating a setsummary table with "
+ "mode %u\n", setsum->type);
+
+ te->data = (void *)setsum;
+ TAILQ_INSERT_TAIL(member_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return setsum;
+
+error_unlock_exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_member_free(setsum);
+ return NULL;
+}
+
+int
+rte_member_add(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t set_id)
+{
+ if (setsum == NULL || key == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_add_ht(setsum, key, set_id);
+ case RTE_MEMBER_TYPE_VBF:
+ return rte_member_add_vbf(setsum, key, set_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_member_lookup(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t *set_id)
+{
+ if (setsum == NULL || key == NULL || set_id == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_lookup_ht(setsum, key, set_id);
+ case RTE_MEMBER_TYPE_VBF:
+ return rte_member_lookup_vbf(setsum, key, set_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_member_lookup_bulk(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ member_set_t *set_ids)
+{
+ if (setsum == NULL || keys == NULL || set_ids == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_lookup_bulk_ht(setsum, keys, num_keys,
+ set_ids);
+ case RTE_MEMBER_TYPE_VBF:
+ return rte_member_lookup_bulk_vbf(setsum, keys, num_keys,
+ set_ids);
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_member_lookup_multi(const struct rte_member_setsum *setsum, const void *key,
+ uint32_t match_per_key, member_set_t *set_id)
+{
+ if (setsum == NULL || key == NULL || set_id == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_lookup_multi_ht(setsum, key, match_per_key,
+ set_id);
+ case RTE_MEMBER_TYPE_VBF:
+ return rte_member_lookup_multi_vbf(setsum, key, match_per_key,
+ set_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_member_lookup_multi_bulk(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ uint32_t max_match_per_key, uint32_t *match_count,
+ member_set_t *set_ids)
+{
+ if (setsum == NULL || keys == NULL || set_ids == NULL ||
+ match_count == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_lookup_multi_bulk_ht(setsum, keys, num_keys,
+ max_match_per_key, match_count, set_ids);
+ case RTE_MEMBER_TYPE_VBF:
+ return rte_member_lookup_multi_bulk_vbf(setsum, keys, num_keys,
+ max_match_per_key, match_count, set_ids);
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_member_delete(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t set_id)
+{
+ if (setsum == NULL || key == NULL)
+ return -EINVAL;
+
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ return rte_member_delete_ht(setsum, key, set_id);
+ /* current vBF implementation does not support delete function */
+ case RTE_MEMBER_TYPE_VBF:
+ default:
+ return -EINVAL;
+ }
+}
+
+void
+rte_member_reset(const struct rte_member_setsum *setsum)
+{
+ if (setsum == NULL)
+ return;
+ switch (setsum->type) {
+ case RTE_MEMBER_TYPE_HT:
+ rte_member_reset_ht(setsum);
+ return;
+ case RTE_MEMBER_TYPE_VBF:
+ rte_member_reset_vbf(setsum);
+ return;
+ default:
+ return;
+ }
+}
+
+RTE_INIT(librte_member_init_log);
+
+static void
+librte_member_init_log(void)
+{
+ librte_member_logtype = rte_log_register("librte.member");
+ if (librte_member_logtype >= 0)
+ rte_log_set_level(librte_member_logtype, RTE_LOG_DEBUG);
+}
diff --git a/lib/librte_member/rte_member.h b/lib/librte_member/rte_member.h
new file mode 100644
index 00000000..9b0c8f99
--- /dev/null
+++ b/lib/librte_member/rte_member.h
@@ -0,0 +1,513 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * RTE Membership Library
+ *
+ * The Membership Library is an extension and generalization of a traditional
+ * filter (for example Bloom Filter and cuckoo filter) structure that has
+ * multiple usages in a variety of workloads and applications. The library is
+ * used to test if a key belongs to certain sets. Two types of such
+ * "set-summary" structures are implemented: hash-table based (HT) and vector
+ * bloom filter (vBF). For HT setsummary, two subtypes or modes are available,
+ * cache and non-cache modes. The table below summarize some properties of
+ * the different implementations.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+/**
+ * <!--
+ * +==========+=====================+================+=========================+
+ * | type | vbf | HT-cache | HT-non-cache |
+ * +==========+=====================+==========================================+
+ * |structure | bloom-filter array | hash-table like without storing key |
+ * +----------+---------------------+------------------------------------------+
+ * |set id | limited by bf count | [1, 0x7fff] |
+ * | | up to 32. | |
+ * +----------+---------------------+------------------------------------------+
+ * |usages & | small set range, | can delete, | cache most recent keys, |
+ * |properties| user-specified | big set range, | have both false-positive|
+ * | | false-positive rate,| small false | and false-negative |
+ * | | no deletion support.| positive depend| depend on table size, |
+ * | | | on table size, | automatic overwritten. |
+ * | | | new key does | |
+ * | | | not overwrite | |
+ * | | | existing key. | |
+ * +----------+---------------------+----------------+-------------------------+
+ * -->
+ */
+
+#ifndef _RTE_MEMBER_H_
+#define _RTE_MEMBER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** The set ID type that stored internally in hash table based set summary. */
+typedef uint16_t member_set_t;
+/** Invalid set ID used to mean no match found. */
+#define RTE_MEMBER_NO_MATCH 0
+/** Maximum size of hash table that can be created. */
+#define RTE_MEMBER_ENTRIES_MAX (1 << 30)
+/** Maximum number of keys that can be searched as a bulk */
+#define RTE_MEMBER_LOOKUP_BULK_MAX 64
+/** Entry count per bucket in hash table based mode. */
+#define RTE_MEMBER_BUCKET_ENTRIES 16
+/** Maximum number of characters in setsum name. */
+#define RTE_MEMBER_NAMESIZE 32
+
+/** @internal Hash function used by membership library. */
+#if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#include <rte_hash_crc.h>
+#define MEMBER_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define MEMBER_HASH_FUNC rte_jhash
+#endif
+
+extern int librte_member_logtype;
+
+#define RTE_MEMBER_LOG(level, fmt, args...) \
+rte_log(RTE_LOG_ ## level, librte_member_logtype, "%s(): " fmt, \
+ __func__, ## args)
+
+/** @internal setsummary structure. */
+struct rte_member_setsum;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Parameter struct used to create set summary
+ */
+struct rte_member_parameters;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Define different set summary types
+ */
+enum rte_member_setsum_type {
+ RTE_MEMBER_TYPE_HT = 0, /**< Hash table based set summary. */
+ RTE_MEMBER_TYPE_VBF, /**< Vector of bloom filters. */
+ RTE_MEMBER_NUM_TYPE
+};
+
+/** @internal compare function for different arch. */
+enum rte_member_sig_compare_function {
+ RTE_MEMBER_COMPARE_SCALAR = 0,
+ RTE_MEMBER_COMPARE_AVX2,
+ RTE_MEMBER_COMPARE_NUM
+};
+
+/** @internal setsummary structure. */
+struct rte_member_setsum {
+ enum rte_member_setsum_type type; /* Type of the set summary. */
+ uint32_t key_len; /* Length of key. */
+ uint32_t prim_hash_seed; /* Primary hash function seed. */
+ uint32_t sec_hash_seed; /* Secondary hash function seed. */
+
+ /* Hash table based. */
+ uint32_t bucket_cnt; /* Number of buckets. */
+ uint32_t bucket_mask; /* Bit mask to get bucket index. */
+ /* For runtime selecting AVX, scalar, etc for signature comparison. */
+ enum rte_member_sig_compare_function sig_cmp_fn;
+ uint8_t cache; /* If it is cache mode for ht based. */
+
+ /* Vector bloom filter. */
+ uint32_t num_set; /* Number of set (bf) in vbf. */
+ uint32_t bits; /* Number of bits in each bf. */
+ uint32_t bit_mask; /* Bit mask to get bit location in bf. */
+ uint32_t num_hashes; /* Number of hash values to index bf. */
+
+ uint32_t mul_shift; /* vbf internal variable used during bit test. */
+ uint32_t div_shift; /* vbf internal variable used during bit test. */
+
+ void *table; /* This is the handler of hash table or vBF array. */
+
+
+ /* Second cache line should start here. */
+ uint32_t socket_id; /* NUMA Socket ID for memory. */
+ char name[RTE_MEMBER_NAMESIZE]; /* Name of this set summary. */
+} __rte_cache_aligned;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Parameters used when create the set summary table. Currently user can
+ * specify two types of setsummary: HT based and vBF. For HT based, user can
+ * specify cache or non-cache mode. Here is a table to describe some differences
+ *
+ */
+struct rte_member_parameters {
+ const char *name; /**< Name of the hash. */
+
+ /**
+ * User to specify the type of the setsummary from one of
+ * rte_member_setsum_type.
+ *
+ * HT based setsummary is implemented like a hash table. User should use
+ * this type when there are many sets.
+ *
+ * vBF setsummary is a vector of bloom filters. It is used when number
+ * of sets is not big (less than 32 for current implementation).
+ */
+ enum rte_member_setsum_type type;
+
+ /**
+ * is_cache is only used for HT based setsummary.
+ *
+ * If it is HT based setsummary, user to specify the subtype or mode
+ * of the setsummary. It could be cache, or non-cache mode.
+ * Set is_cache to be 1 if to use as cache mode.
+ *
+ * For cache mode, keys can be evicted out of the HT setsummary. Keys
+ * with the same signature and map to the same bucket
+ * will overwrite each other in the setsummary table.
+ * This mode is useful for the case that the set-summary only
+ * needs to keep record of the recently inserted keys. Both
+ * false-negative and false-positive could happen.
+ *
+ * For non-cache mode, keys cannot be evicted out of the cache. So for
+ * this mode the setsummary will become full eventually. Keys with the
+ * same signature but map to the same bucket will still occupy multiple
+ * entries. This mode does not give false-negative result.
+ */
+ uint8_t is_cache;
+
+ /**
+ * For HT setsummary, num_keys equals to the number of entries of the
+ * table. When the number of keys inserted in the HT setsummary
+ * approaches this number, eviction could happen. For cache mode,
+ * keys could be evicted out of the table. For non-cache mode, keys will
+ * be evicted to other buckets like cuckoo hash. The table will also
+ * likely to become full before the number of inserted keys equal to the
+ * total number of entries.
+ *
+ * For vBF, num_keys equal to the expected number of keys that will
+ * be inserted into the vBF. The implementation assumes the keys are
+ * evenly distributed to each BF in vBF. This is used to calculate the
+ * number of bits we need for each BF. User does not specify the size of
+ * each BF directly because the optimal size depends on the num_keys
+ * and false positive rate.
+ */
+ uint32_t num_keys;
+
+ /**
+ * The length of key is used for hash calculation. Since key is not
+ * stored in set-summary, large key does not require more memory space.
+ */
+ uint32_t key_len;
+
+ /**
+ * num_set is only used for vBF, but not used for HT setsummary.
+ *
+ * num_set is equal to the number of BFs in vBF. For current
+ * implementation, it only supports 1,2,4,8,16,32 BFs in one vBF set
+ * summary. If other number of sets are needed, for example 5, the user
+ * should allocate the minimum available value that larger than 5,
+ * which is 8.
+ */
+ uint32_t num_set;
+
+ /**
+ * false_positive_rate is only used for vBF, but not used for HT
+ * setsummary.
+ *
+ * For vBF, false_positive_rate is the user-defined false positive rate
+ * given expected number of inserted keys (num_keys). It is used to
+ * calculate the total number of bits for each BF, and the number of
+ * hash values used during lookup and insertion. For details please
+ * refer to vBF implementation and membership library documentation.
+ *
+ * For HT, This parameter is not directly set by users.
+ * HT setsummary's false positive rate is in the order of:
+ * false_pos = (1/bucket_count)*(1/2^16), since we use 16-bit signature.
+ * This is because two keys needs to map to same bucket and same
+ * signature to have a collision (false positive). bucket_count is equal
+ * to number of entries (num_keys) divided by entry count per bucket
+ * (RTE_MEMBER_BUCKET_ENTRIES). Thus, the false_positive_rate is not
+ * directly set by users for HT mode.
+ */
+ float false_positive_rate;
+
+ /**
+ * We use two seeds to calculate two independent hashes for each key.
+ *
+ * For HT type, one hash is used as signature, and the other is used
+ * for bucket location.
+ * For vBF type, these two hashes and their combinations are used as
+ * hash locations to index the bit array.
+ */
+ uint32_t prim_hash_seed;
+
+ /**
+ * The secondary seed should be a different value from the primary seed.
+ */
+ uint32_t sec_hash_seed;
+
+ int socket_id; /**< NUMA Socket ID for memory. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Find an existing set-summary and return a pointer to it.
+ *
+ * @param name
+ * Name of the set-summary.
+ * @return
+ * Pointer to the set-summary or NULL if object not found
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - value not available for return
+ */
+struct rte_member_setsum *
+rte_member_find_existing(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create set-summary (SS).
+ *
+ * @param params
+ * Parameters to initialize the setsummary.
+ * @return
+ * Return the pointer to the setsummary.
+ * Return value is NULL if the creation failed.
+ */
+struct rte_member_setsum *
+rte_member_create(const struct rte_member_parameters *params);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup key in set-summary (SS).
+ * Single key lookup and return as soon as the first match found
+ *
+ * @param setsum
+ * Pointer of a setsummary.
+ * @param key
+ * Pointer of the key to be looked up.
+ * @param set_id
+ * Output the set id matches the key.
+ * @return
+ * Return 1 for found a match and 0 for not found a match.
+ */
+int
+rte_member_lookup(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t *set_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup bulk of keys in set-summary (SS).
+ * Each key lookup returns as soon as the first match found
+ *
+ * @param setsum
+ * Pointer of a setsummary.
+ * @param keys
+ * Pointer of the bulk of keys to be looked up.
+ * @param num_keys
+ * Number of keys that will be lookup.
+ * @param set_ids
+ * Output set ids for all the keys to this array.
+ * User should preallocate array that can contain all results, which size is
+ * the num_keys.
+ * @return
+ * The number of keys that found a match.
+ */
+int
+rte_member_lookup_bulk(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ member_set_t *set_ids);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup a key in set-summary (SS) for multiple matches.
+ * The key lookup will find all matched entries (multiple match).
+ * Note that for cache mode of HT, each key can have at most one match. This is
+ * because keys with same signature that maps to same bucket will overwrite
+ * each other. So multi-match lookup should be used for vBF and non-cache HT.
+ *
+ * @param setsum
+ * Pointer of a set-summary.
+ * @param key
+ * Pointer of the key that to be looked up.
+ * @param max_match_per_key
+ * User specified maximum number of matches for each key. The function returns
+ * as soon as this number of matches found for the key.
+ * @param set_id
+ * Output set ids for all the matches of the key. User needs to preallocate
+ * the array that can contain max_match_per_key number of results.
+ * @return
+ * The number of matches that found for the key.
+ * For cache mode HT set-summary, the number should be at most 1.
+ */
+int
+rte_member_lookup_multi(const struct rte_member_setsum *setsum,
+ const void *key, uint32_t max_match_per_key,
+ member_set_t *set_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup a bulk of keys in set-summary (SS) for multiple matches each key.
+ * Each key lookup will find all matched entries (multiple match).
+ * Note that for cache mode HT, each key can have at most one match. So
+ * multi-match function is mainly used for vBF and non-cache mode HT.
+ *
+ * @param setsum
+ * Pointer of a setsummary.
+ * @param keys
+ * Pointer of the keys to be looked up.
+ * @param num_keys
+ * The number of keys that will be lookup.
+ * @param max_match_per_key
+ * The possible maximum number of matches for each key.
+ * @param match_count
+ * Output the number of matches for each key in an array.
+ * @param set_ids
+ * Return set ids for all the matches of all keys. Users pass in a
+ * preallocated 2D array with first dimension as key index and second
+ * dimension as match index. For example set_ids[bulk_size][max_match_per_key]
+ * @return
+ * The number of keys that found one or more matches in the set-summary.
+ */
+int
+rte_member_lookup_multi_bulk(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ uint32_t max_match_per_key,
+ uint32_t *match_count,
+ member_set_t *set_ids);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Insert key into set-summary (SS).
+ *
+ * @param setsum
+ * Pointer of a set-summary.
+ * @param key
+ * Pointer of the key to be added.
+ * @param set_id
+ * The set id associated with the key that needs to be added. Different mode
+ * supports different set_id ranges. 0 cannot be used as set_id since
+ * RTE_MEMBER_NO_MATCH by default is set as 0.
+ * For HT mode, the set_id has range as [1, 0x7FFF], MSB is reserved.
+ * For vBF mode the set id is limited by the num_set parameter when create
+ * the set-summary.
+ * @return
+ * HT (cache mode) and vBF should never fail unless the set_id is not in the
+ * valid range. In such case -EINVAL is returned.
+ * For HT (non-cache mode) it could fail with -ENOSPC error code when table is
+ * full.
+ * For success it returns different values for different modes to provide
+ * extra information for users.
+ * Return 0 for HT (cache mode) if the add does not cause
+ * eviction, return 1 otherwise. Return 0 for non-cache mode if success,
+ * -ENOSPC for full, and 1 if cuckoo eviction happens.
+ * Always returns 0 for vBF mode.
+ */
+int
+rte_member_add(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t set_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * De-allocate memory used by set-summary.
+ *
+ * @param setsum
+ * Pointer to the set summary.
+ */
+void
+rte_member_free(struct rte_member_setsum *setsum);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset the set-summary tables. E.g. reset bits to be 0 in BF,
+ * reset set_id in each entry to be RTE_MEMBER_NO_MATCH in HT based SS.
+ *
+ * @param setsum
+ * Pointer to the set-summary.
+ */
+void
+rte_member_reset(const struct rte_member_setsum *setsum);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete items from the set-summary. Note that vBF does not support deletion
+ * in current implementation. For vBF, error code of -EINVAL will be returned.
+ *
+ * @param setsum
+ * Pointer to the set-summary.
+ * @param key
+ * Pointer of the key to be deleted.
+ * @param set_id
+ * For HT mode, we need both key and its corresponding set_id to
+ * properly delete the key. Without set_id, we may delete other keys with the
+ * same signature.
+ * @return
+ * If no entry found to delete, an error code of -ENOENT could be returned.
+ */
+int
+rte_member_delete(const struct rte_member_setsum *setsum, const void *key,
+ member_set_t set_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMBER_H_ */
diff --git a/lib/librte_member/rte_member_ht.c b/lib/librte_member/rte_member_ht.c
new file mode 100644
index 00000000..59332d56
--- /dev/null
+++ b/lib/librte_member/rte_member_ht.c
@@ -0,0 +1,586 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_prefetch.h>
+#include <rte_random.h>
+#include <rte_log.h>
+
+#include "rte_member.h"
+#include "rte_member_ht.h"
+
+#if defined(RTE_ARCH_X86)
+#include "rte_member_x86.h"
+#endif
+
+/* Search bucket for entry with tmp_sig and update set_id */
+static inline int
+update_entry_search(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ member_set_t set_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ if (buckets[bucket_id].sigs[i] == tmp_sig) {
+ buckets[bucket_id].sets[i] = set_id;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline int
+search_bucket_single(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ member_set_t *set_id)
+{
+ uint32_t iter;
+
+ for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
+ if (tmp_sig == buckets[bucket_id].sigs[iter] &&
+ buckets[bucket_id].sets[iter] !=
+ RTE_MEMBER_NO_MATCH) {
+ *set_id = buckets[bucket_id].sets[iter];
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static inline void
+search_bucket_multi(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ uint32_t *counter,
+ uint32_t matches_per_key,
+ member_set_t *set_id)
+{
+ uint32_t iter;
+
+ for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
+ if (tmp_sig == buckets[bucket_id].sigs[iter] &&
+ buckets[bucket_id].sets[iter] !=
+ RTE_MEMBER_NO_MATCH) {
+ set_id[*counter] = buckets[bucket_id].sets[iter];
+ (*counter)++;
+ if (*counter >= matches_per_key)
+ return;
+ }
+ }
+}
+
+int
+rte_member_create_ht(struct rte_member_setsum *ss,
+ const struct rte_member_parameters *params)
+{
+ uint32_t i, j;
+ uint32_t size_bucket_t;
+ uint32_t num_entries = rte_align32pow2(params->num_keys);
+
+ if ((num_entries > RTE_MEMBER_ENTRIES_MAX) ||
+ !rte_is_power_of_2(RTE_MEMBER_BUCKET_ENTRIES) ||
+ num_entries < RTE_MEMBER_BUCKET_ENTRIES) {
+ rte_errno = EINVAL;
+ RTE_MEMBER_LOG(ERR,
+ "Membership HT create with invalid parameters\n");
+ return -EINVAL;
+ }
+
+ uint32_t num_buckets = num_entries / RTE_MEMBER_BUCKET_ENTRIES;
+
+ size_bucket_t = sizeof(struct member_ht_bucket);
+
+ struct member_ht_bucket *buckets = rte_zmalloc_socket(NULL,
+ num_buckets * size_bucket_t,
+ RTE_CACHE_LINE_SIZE, ss->socket_id);
+
+ if (buckets == NULL) {
+ RTE_MEMBER_LOG(ERR, "memory allocation failed for HT "
+ "setsummary\n");
+ return -ENOMEM;
+ }
+
+ ss->table = buckets;
+ ss->bucket_cnt = num_buckets;
+ ss->bucket_mask = num_buckets - 1;
+ ss->cache = params->is_cache;
+
+ for (i = 0; i < num_buckets; i++) {
+ for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
+ buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;
+ }
+#if defined(RTE_ARCH_X86)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
+ RTE_MEMBER_BUCKET_ENTRIES == 16)
+ ss->sig_cmp_fn = RTE_MEMBER_COMPARE_AVX2;
+ else
+#endif
+ ss->sig_cmp_fn = RTE_MEMBER_COMPARE_SCALAR;
+
+ RTE_MEMBER_LOG(DEBUG, "Hash table based filter created, "
+ "the table has %u entries, %u buckets\n",
+ num_entries, num_buckets);
+ return 0;
+}
+
+static inline void
+get_buckets_index(const struct rte_member_setsum *ss, const void *key,
+ uint32_t *prim_bkt, uint32_t *sec_bkt, member_sig_t *sig)
+{
+ uint32_t first_hash = MEMBER_HASH_FUNC(key, ss->key_len,
+ ss->prim_hash_seed);
+ uint32_t sec_hash = MEMBER_HASH_FUNC(&first_hash, sizeof(uint32_t),
+ ss->sec_hash_seed);
+ /*
+ * We use the first hash value for the signature, and the second hash
+ * value to derive the primary and secondary bucket locations.
+ *
+ * For non-cache mode, we use the lower bits for the primary bucket
+ * location. Then we xor primary bucket location and the signature
+ * to get the secondary bucket location. This is called "partial-key
+ * cuckoo hashing" proposed by B. Fan, et al's paper
+ * "Cuckoo Filter: Practically Better Than Bloom". The benefit to use
+ * xor is that one could derive the alternative bucket location
+ * by only using the current bucket location and the signature. This is
+ * generally required by non-cache mode's eviction and deletion
+ * process without the need to store alternative hash value nor the full
+ * key.
+ *
+ * For cache mode, we use the lower bits for the primary bucket
+ * location and the higher bits for the secondary bucket location. In
+ * cache mode, keys are simply overwritten if bucket is full. We do not
+ * use xor since lower/higher bits are more independent hash values thus
+ * should provide slightly better table load.
+ */
+ *sig = first_hash;
+ if (ss->cache) {
+ *prim_bkt = sec_hash & ss->bucket_mask;
+ *sec_bkt = (sec_hash >> 16) & ss->bucket_mask;
+ } else {
+ *prim_bkt = sec_hash & ss->bucket_mask;
+ *sec_bkt = (*prim_bkt ^ *sig) & ss->bucket_mask;
+ }
+}
+
+int
+rte_member_lookup_ht(const struct rte_member_setsum *ss,
+ const void *key, member_set_t *set_id)
+{
+ uint32_t prim_bucket, sec_bucket;
+ member_sig_t tmp_sig;
+ struct member_ht_bucket *buckets = ss->table;
+
+ *set_id = RTE_MEMBER_NO_MATCH;
+ get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
+
+ switch (ss->sig_cmp_fn) {
+#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX2)
+ case RTE_MEMBER_COMPARE_AVX2:
+ if (search_bucket_single_avx(prim_bucket, tmp_sig, buckets,
+ set_id) ||
+ search_bucket_single_avx(sec_bucket, tmp_sig,
+ buckets, set_id))
+ return 1;
+ break;
+#endif
+ default:
+ if (search_bucket_single(prim_bucket, tmp_sig, buckets,
+ set_id) ||
+ search_bucket_single(sec_bucket, tmp_sig,
+ buckets, set_id))
+ return 1;
+ }
+
+ return 0;
+}
+
+uint32_t
+rte_member_lookup_bulk_ht(const struct rte_member_setsum *ss,
+ const void **keys, uint32_t num_keys, member_set_t *set_id)
+{
+ uint32_t i;
+ uint32_t num_matches = 0;
+ struct member_ht_bucket *buckets = ss->table;
+ member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
+
+ for (i = 0; i < num_keys; i++) {
+ get_buckets_index(ss, keys[i], &prim_buckets[i],
+ &sec_buckets[i], &tmp_sig[i]);
+ rte_prefetch0(&buckets[prim_buckets[i]]);
+ rte_prefetch0(&buckets[sec_buckets[i]]);
+ }
+
+ for (i = 0; i < num_keys; i++) {
+ switch (ss->sig_cmp_fn) {
+#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX2)
+ case RTE_MEMBER_COMPARE_AVX2:
+ if (search_bucket_single_avx(prim_buckets[i],
+ tmp_sig[i], buckets, &set_id[i]) ||
+ search_bucket_single_avx(sec_buckets[i],
+ tmp_sig[i], buckets, &set_id[i]))
+ num_matches++;
+ else
+ set_id[i] = RTE_MEMBER_NO_MATCH;
+ break;
+#endif
+ default:
+ if (search_bucket_single(prim_buckets[i], tmp_sig[i],
+ buckets, &set_id[i]) ||
+ search_bucket_single(sec_buckets[i],
+ tmp_sig[i], buckets, &set_id[i]))
+ num_matches++;
+ else
+ set_id[i] = RTE_MEMBER_NO_MATCH;
+ }
+ }
+ return num_matches;
+}
+
+uint32_t
+rte_member_lookup_multi_ht(const struct rte_member_setsum *ss,
+ const void *key, uint32_t match_per_key,
+ member_set_t *set_id)
+{
+ uint32_t num_matches = 0;
+ uint32_t prim_bucket, sec_bucket;
+ member_sig_t tmp_sig;
+ struct member_ht_bucket *buckets = ss->table;
+
+ get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
+
+ switch (ss->sig_cmp_fn) {
+#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX2)
+ case RTE_MEMBER_COMPARE_AVX2:
+ search_bucket_multi_avx(prim_bucket, tmp_sig, buckets,
+ &num_matches, match_per_key, set_id);
+ if (num_matches < match_per_key)
+ search_bucket_multi_avx(sec_bucket, tmp_sig,
+ buckets, &num_matches, match_per_key, set_id);
+ return num_matches;
+#endif
+ default:
+ search_bucket_multi(prim_bucket, tmp_sig, buckets, &num_matches,
+ match_per_key, set_id);
+ if (num_matches < match_per_key)
+ search_bucket_multi(sec_bucket, tmp_sig,
+ buckets, &num_matches, match_per_key, set_id);
+ return num_matches;
+ }
+}
+
+uint32_t
+rte_member_lookup_multi_bulk_ht(const struct rte_member_setsum *ss,
+ const void **keys, uint32_t num_keys, uint32_t match_per_key,
+ uint32_t *match_count,
+ member_set_t *set_ids)
+{
+ uint32_t i;
+ uint32_t num_matches = 0;
+ struct member_ht_bucket *buckets = ss->table;
+ uint32_t match_cnt_tmp;
+ member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
+
+ for (i = 0; i < num_keys; i++) {
+ get_buckets_index(ss, keys[i], &prim_buckets[i],
+ &sec_buckets[i], &tmp_sig[i]);
+ rte_prefetch0(&buckets[prim_buckets[i]]);
+ rte_prefetch0(&buckets[sec_buckets[i]]);
+ }
+ for (i = 0; i < num_keys; i++) {
+ match_cnt_tmp = 0;
+
+ switch (ss->sig_cmp_fn) {
+#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX2)
+ case RTE_MEMBER_COMPARE_AVX2:
+ search_bucket_multi_avx(prim_buckets[i], tmp_sig[i],
+ buckets, &match_cnt_tmp, match_per_key,
+ &set_ids[i*match_per_key]);
+ if (match_cnt_tmp < match_per_key)
+ search_bucket_multi_avx(sec_buckets[i],
+ tmp_sig[i], buckets, &match_cnt_tmp,
+ match_per_key,
+ &set_ids[i*match_per_key]);
+ match_count[i] = match_cnt_tmp;
+ if (match_cnt_tmp != 0)
+ num_matches++;
+ break;
+#endif
+ default:
+ search_bucket_multi(prim_buckets[i], tmp_sig[i],
+ buckets, &match_cnt_tmp, match_per_key,
+ &set_ids[i*match_per_key]);
+ if (match_cnt_tmp < match_per_key)
+ search_bucket_multi(sec_buckets[i], tmp_sig[i],
+ buckets, &match_cnt_tmp, match_per_key,
+ &set_ids[i*match_per_key]);
+ match_count[i] = match_cnt_tmp;
+ if (match_cnt_tmp != 0)
+ num_matches++;
+ }
+ }
+ return num_matches;
+}
+
+static inline int
+try_insert(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
+ member_sig_t sig, member_set_t set_id)
+{
+ int i;
+ /* If not full then insert into one slot */
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ if (buckets[prim].sets[i] == RTE_MEMBER_NO_MATCH) {
+ buckets[prim].sigs[i] = sig;
+ buckets[prim].sets[i] = set_id;
+ return 0;
+ }
+ }
+ /* If prim failed, we need to access second bucket */
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ if (buckets[sec].sets[i] == RTE_MEMBER_NO_MATCH) {
+ buckets[sec].sigs[i] = sig;
+ buckets[sec].sets[i] = set_id;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static inline int
+try_update(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
+ member_sig_t sig, member_set_t set_id,
+ enum rte_member_sig_compare_function cmp_fn)
+{
+ switch (cmp_fn) {
+#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX2)
+ case RTE_MEMBER_COMPARE_AVX2:
+ if (update_entry_search_avx(prim, sig, buckets, set_id) ||
+ update_entry_search_avx(sec, sig, buckets,
+ set_id))
+ return 0;
+ break;
+#endif
+ default:
+ if (update_entry_search(prim, sig, buckets, set_id) ||
+ update_entry_search(sec, sig, buckets,
+ set_id))
+ return 0;
+ }
+ return -1;
+}
+
+static inline int
+evict_from_bucket(void)
+{
+ /* For now, we randomly pick one entry to evict */
+ return rte_rand() & (RTE_MEMBER_BUCKET_ENTRIES - 1);
+}
+
+/*
+ * This function is similar to the cuckoo hash make_space function in hash
+ * library
+ */
+static inline int
+make_space_bucket(const struct rte_member_setsum *ss, uint32_t bkt_idx,
+ unsigned int *nr_pushes)
+{
+ unsigned int i, j;
+ int ret;
+ struct member_ht_bucket *buckets = ss->table;
+ uint32_t next_bucket_idx;
+ struct member_ht_bucket *next_bkt[RTE_MEMBER_BUCKET_ENTRIES];
+ struct member_ht_bucket *bkt = &buckets[bkt_idx];
+ /* MSB is set to indicate if an entry has been already pushed */
+ member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
+
+ /*
+ * Push existing item (search for bucket with space in
+ * alternative locations) to its alternative location
+ */
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ /* Search for space in alternative locations */
+ next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
+ next_bkt[i] = &buckets[next_bucket_idx];
+ for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++) {
+ if (next_bkt[i]->sets[j] == RTE_MEMBER_NO_MATCH)
+ break;
+ }
+
+ if (j != RTE_MEMBER_BUCKET_ENTRIES)
+ break;
+ }
+
+ /* Alternative location has spare room (end of recursive function) */
+ if (i != RTE_MEMBER_BUCKET_ENTRIES) {
+ next_bkt[i]->sigs[j] = bkt->sigs[i];
+ next_bkt[i]->sets[j] = bkt->sets[i];
+ return i;
+ }
+
+ /* Pick entry that has not been pushed yet */
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++)
+ if ((bkt->sets[i] & flag_mask) == 0)
+ break;
+
+ /* All entries have been pushed, so entry cannot be added */
+ if (i == RTE_MEMBER_BUCKET_ENTRIES ||
+ ++(*nr_pushes) > RTE_MEMBER_MAX_PUSHES)
+ return -ENOSPC;
+
+ next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
+ /* Set flag to indicate that this entry is going to be pushed */
+ bkt->sets[i] |= flag_mask;
+
+ /* Need room in alternative bucket to insert the pushed entry */
+ ret = make_space_bucket(ss, next_bucket_idx, nr_pushes);
+ /*
+ * After recursive function.
+ * Clear flags and insert the pushed entry
+ * in its alternative location if successful,
+ * or return error
+ */
+ bkt->sets[i] &= ~flag_mask;
+ if (ret >= 0) {
+ next_bkt[i]->sigs[ret] = bkt->sigs[i];
+ next_bkt[i]->sets[ret] = bkt->sets[i];
+ return i;
+ } else
+ return ret;
+}
+
+int
+rte_member_add_ht(const struct rte_member_setsum *ss,
+ const void *key, member_set_t set_id)
+{
+ int ret;
+ unsigned int nr_pushes = 0;
+ uint32_t prim_bucket, sec_bucket;
+ member_sig_t tmp_sig;
+ struct member_ht_bucket *buckets = ss->table;
+ member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
+
+ if (set_id == RTE_MEMBER_NO_MATCH || (set_id & flag_mask) != 0)
+ return -EINVAL;
+
+ get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
+
+ /*
+ * If it is cache based setsummary, we try overwriting (updating)
+ * existing entry with the same signature first. In cache mode, we allow
+ * false negatives and only cache the most recent keys.
+ *
+ * For non-cache mode, we do not update existing entry with the same
+ * signature. This is because if two keys with same signature update
+ * each other, false negative may happen, which is not the expected
+ * behavior for non-cache setsummary.
+ */
+ if (ss->cache) {
+ ret = try_update(buckets, prim_bucket, sec_bucket, tmp_sig,
+ set_id, ss->sig_cmp_fn);
+ if (ret != -1)
+ return ret;
+ }
+ /* If not full then insert into one slot */
+ ret = try_insert(buckets, prim_bucket, sec_bucket, tmp_sig, set_id);
+ if (ret != -1)
+ return ret;
+
+ /* Random pick prim or sec for recursive displacement */
+ uint32_t select_bucket = (tmp_sig && 1U) ? prim_bucket : sec_bucket;
+ if (ss->cache) {
+ ret = evict_from_bucket();
+ buckets[select_bucket].sigs[ret] = tmp_sig;
+ buckets[select_bucket].sets[ret] = set_id;
+ return 1;
+ }
+
+ ret = make_space_bucket(ss, select_bucket, &nr_pushes);
+ if (ret >= 0) {
+ buckets[select_bucket].sigs[ret] = tmp_sig;
+ buckets[select_bucket].sets[ret] = set_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void
+rte_member_free_ht(struct rte_member_setsum *ss)
+{
+ rte_free(ss->table);
+}
+
+int
+rte_member_delete_ht(const struct rte_member_setsum *ss, const void *key,
+ member_set_t set_id)
+{
+ int i;
+ uint32_t prim_bucket, sec_bucket;
+ member_sig_t tmp_sig;
+ struct member_ht_bucket *buckets = ss->table;
+
+ get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
+
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ if (tmp_sig == buckets[prim_bucket].sigs[i] &&
+ set_id == buckets[prim_bucket].sets[i]) {
+ buckets[prim_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
+ if (tmp_sig == buckets[sec_bucket].sigs[i] &&
+ set_id == buckets[sec_bucket].sets[i]) {
+ buckets[sec_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+void
+rte_member_reset_ht(const struct rte_member_setsum *ss)
+{
+ uint32_t i, j;
+ struct member_ht_bucket *buckets = ss->table;
+
+ for (i = 0; i < ss->bucket_cnt; i++) {
+ for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
+ buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;
+ }
+}
diff --git a/lib/librte_member/rte_member_ht.h b/lib/librte_member/rte_member_ht.h
new file mode 100644
index 00000000..3148a492
--- /dev/null
+++ b/lib/librte_member/rte_member_ht.h
@@ -0,0 +1,94 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMBER_HT_H_
+#define _RTE_MEMBER_HT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Maximum number of pushes for cuckoo path in HT mode. */
+#define RTE_MEMBER_MAX_PUSHES 50
+
+typedef uint16_t member_sig_t; /* signature size is 16 bit */
+
+/* The bucket struct for ht setsum */
+struct member_ht_bucket {
+ member_sig_t sigs[RTE_MEMBER_BUCKET_ENTRIES]; /* 2-byte signature */
+ member_set_t sets[RTE_MEMBER_BUCKET_ENTRIES]; /* 2-byte set */
+} __rte_cache_aligned;
+
+int
+rte_member_create_ht(struct rte_member_setsum *ss,
+ const struct rte_member_parameters *params);
+
+int
+rte_member_lookup_ht(const struct rte_member_setsum *setsum,
+ const void *key, member_set_t *set_id);
+
+uint32_t
+rte_member_lookup_bulk_ht(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ member_set_t *set_ids);
+
+uint32_t
+rte_member_lookup_multi_ht(const struct rte_member_setsum *setsum,
+ const void *key, uint32_t match_per_key,
+ member_set_t *set_id);
+
+uint32_t
+rte_member_lookup_multi_bulk_ht(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys, uint32_t match_per_key,
+ uint32_t *match_count,
+ member_set_t *set_ids);
+
+int
+rte_member_add_ht(const struct rte_member_setsum *setsum,
+ const void *key, member_set_t set_id);
+
+void
+rte_member_free_ht(struct rte_member_setsum *setsum);
+
+int
+rte_member_delete_ht(const struct rte_member_setsum *ss, const void *key,
+ member_set_t set_id);
+
+void
+rte_member_reset_ht(const struct rte_member_setsum *setsum);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMBER_HT_H_ */
diff --git a/lib/librte_member/rte_member_vbf.c b/lib/librte_member/rte_member_vbf.c
new file mode 100644
index 00000000..1a98ac84
--- /dev/null
+++ b/lib/librte_member/rte_member_vbf.c
@@ -0,0 +1,350 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <math.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+
+#include "rte_member.h"
+#include "rte_member_vbf.h"
+
+/*
+ * vBF currently implemented as a big array.
+ * The BFs have a vertical layout. Bits in same location of all bfs will stay
+ * in the same cache line.
+ * For example, if we have 32 bloom filters, we use a uint32_t array to
+ * represent all of them. array[0] represent the first location of all the
+ * bloom filters, array[1] represents the second location of all the
+ * bloom filters, etc. The advantage of this layout is to minimize the average
+ * number of memory accesses to test all bloom filters.
+ *
+ * Currently the implementation supports vBF containing 1,2,4,8,16,32 BFs.
+ */
+int
+rte_member_create_vbf(struct rte_member_setsum *ss,
+ const struct rte_member_parameters *params)
+{
+
+ if (params->num_set > RTE_MEMBER_MAX_BF ||
+ !rte_is_power_of_2(params->num_set) ||
+ params->num_keys == 0 ||
+ params->false_positive_rate == 0 ||
+ params->false_positive_rate > 1) {
+ rte_errno = EINVAL;
+ RTE_MEMBER_LOG(ERR, "Membership vBF create with invalid parameters\n");
+ return -EINVAL;
+ }
+
+ /* We assume expected keys evenly distribute to all BFs */
+ uint32_t num_keys_per_bf = 1 + (params->num_keys - 1) / ss->num_set;
+
+ /*
+ * Note that the false positive rate is for all BFs in the vBF
+ * such that the single BF's false positive rate needs to be
+ * calculated.
+ * Assume each BF's False positive rate is fp_one_bf. The total false
+ * positive rate is fp = 1-(1-fp_one_bf)^n.
+ * => fp_one_bf = 1 - (1-fp)^(1/n)
+ */
+
+ float fp_one_bf = 1 - pow((1 - params->false_positive_rate),
+ 1.0 / ss->num_set);
+
+ if (fp_one_bf == 0) {
+ rte_errno = EINVAL;
+ RTE_MEMBER_LOG(ERR, "Membership BF false positive rate is too small\n");
+ return -EINVAL;
+ }
+
+ uint32_t bits = ceil((num_keys_per_bf *
+ log(fp_one_bf)) /
+ log(1.0 / (pow(2.0, log(2.0)))));
+
+ /* We round to power of 2 for performance during lookup */
+ ss->bits = rte_align32pow2(bits);
+
+ ss->num_hashes = (uint32_t)(log(2.0) * bits / num_keys_per_bf);
+ ss->bit_mask = ss->bits - 1;
+
+ /*
+ * Since we round the bits to power of 2, the final false positive
+ * rate will probably not be same as the user specified. We log the
+ * new value as debug message.
+ */
+ float new_fp = pow((1 - pow((1 - 1.0 / ss->bits), num_keys_per_bf *
+ ss->num_hashes)), ss->num_hashes);
+ new_fp = 1 - pow((1 - new_fp), ss->num_set);
+
+ /*
+ * Reduce hash function count, until we approach the user specified
+ * false-positive rate. Otherwise it is too conservative
+ */
+ int tmp_num_hash = ss->num_hashes;
+
+ while (tmp_num_hash > 1) {
+ float tmp_fp = new_fp;
+
+ tmp_num_hash--;
+ new_fp = pow((1 - pow((1 - 1.0 / ss->bits), num_keys_per_bf *
+ tmp_num_hash)), tmp_num_hash);
+ new_fp = 1 - pow((1 - new_fp), ss->num_set);
+
+ if (new_fp > params->false_positive_rate) {
+ new_fp = tmp_fp;
+ tmp_num_hash++;
+ break;
+ }
+ }
+
+ ss->num_hashes = tmp_num_hash;
+
+ /*
+ * To avoid multiplication and division:
+ * mul_shift is used for multiplication shift during bit test
+ * div_shift is used for division shift, to be divided by number of bits
+ * represented by a uint32_t variable
+ */
+ ss->mul_shift = __builtin_ctzl(ss->num_set);
+ ss->div_shift = __builtin_ctzl(32 >> ss->mul_shift);
+
+ RTE_MEMBER_LOG(DEBUG, "vector bloom filter created, "
+ "each bloom filter expects %u keys, needs %u bits, %u hashes, "
+ "with false positive rate set as %.5f, "
+ "The new calculated vBF false positive rate is %.5f\n",
+ num_keys_per_bf, ss->bits, ss->num_hashes, fp_one_bf, new_fp);
+
+ ss->table = rte_zmalloc_socket(NULL, ss->num_set * (ss->bits >> 3),
+ RTE_CACHE_LINE_SIZE, ss->socket_id);
+ if (ss->table == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline uint32_t
+test_bit(uint32_t bit_loc, const struct rte_member_setsum *ss)
+{
+ uint32_t *vbf = ss->table;
+ uint32_t n = ss->num_set;
+ uint32_t div_shift = ss->div_shift;
+ uint32_t mul_shift = ss->mul_shift;
+ /*
+ * a is how many bits in one BF are represented by one 32bit
+ * variable.
+ */
+ uint32_t a = 32 >> mul_shift;
+ /*
+ * x>>b is the divide, x & (a-1) is the mod, & (1<<n-1) to mask out bits
+ * we do not need
+ */
+ return (vbf[bit_loc >> div_shift] >>
+ ((bit_loc & (a - 1)) << mul_shift)) & ((1ULL << n) - 1);
+}
+
+static inline void
+set_bit(uint32_t bit_loc, const struct rte_member_setsum *ss, int32_t set)
+{
+ uint32_t *vbf = ss->table;
+ uint32_t div_shift = ss->div_shift;
+ uint32_t mul_shift = ss->mul_shift;
+ uint32_t a = 32 >> mul_shift;
+
+ vbf[bit_loc >> div_shift] |=
+ 1UL << (((bit_loc & (a - 1)) << mul_shift) + set - 1);
+}
+
+int
+rte_member_lookup_vbf(const struct rte_member_setsum *ss, const void *key,
+ member_set_t *set_id)
+{
+ uint32_t j;
+ uint32_t h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
+ uint32_t h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t),
+ ss->sec_hash_seed);
+ uint32_t mask = ~0;
+ uint32_t bit_loc;
+
+ for (j = 0; j < ss->num_hashes; j++) {
+ bit_loc = (h1 + j * h2) & ss->bit_mask;
+ mask &= test_bit(bit_loc, ss);
+ }
+
+ if (mask) {
+ *set_id = __builtin_ctzl(mask) + 1;
+ return 1;
+ }
+
+ *set_id = RTE_MEMBER_NO_MATCH;
+ return 0;
+}
+
+uint32_t
+rte_member_lookup_bulk_vbf(const struct rte_member_setsum *ss,
+ const void **keys, uint32_t num_keys, member_set_t *set_ids)
+{
+ uint32_t i, k;
+ uint32_t num_matches = 0;
+ uint32_t mask[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t h1[RTE_MEMBER_LOOKUP_BULK_MAX], h2[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t bit_loc;
+
+ for (i = 0; i < num_keys; i++)
+ h1[i] = MEMBER_HASH_FUNC(keys[i], ss->key_len,
+ ss->prim_hash_seed);
+ for (i = 0; i < num_keys; i++)
+ h2[i] = MEMBER_HASH_FUNC(&h1[i], sizeof(uint32_t),
+ ss->sec_hash_seed);
+ for (i = 0; i < num_keys; i++) {
+ mask[i] = ~0;
+ for (k = 0; k < ss->num_hashes; k++) {
+ bit_loc = (h1[i] + k * h2[i]) & ss->bit_mask;
+ mask[i] &= test_bit(bit_loc, ss);
+ }
+ }
+ for (i = 0; i < num_keys; i++) {
+ if (mask[i]) {
+ set_ids[i] = __builtin_ctzl(mask[i]) + 1;
+ num_matches++;
+ } else
+ set_ids[i] = RTE_MEMBER_NO_MATCH;
+ }
+ return num_matches;
+}
+
+uint32_t
+rte_member_lookup_multi_vbf(const struct rte_member_setsum *ss,
+ const void *key, uint32_t match_per_key,
+ member_set_t *set_id)
+{
+ uint32_t num_matches = 0;
+ uint32_t j;
+ uint32_t h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
+ uint32_t h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t),
+ ss->sec_hash_seed);
+ uint32_t mask = ~0;
+ uint32_t bit_loc;
+
+ for (j = 0; j < ss->num_hashes; j++) {
+ bit_loc = (h1 + j * h2) & ss->bit_mask;
+ mask &= test_bit(bit_loc, ss);
+ }
+ while (mask) {
+ uint32_t loc = __builtin_ctzl(mask);
+ set_id[num_matches] = loc + 1;
+ num_matches++;
+ if (num_matches >= match_per_key)
+ return num_matches;
+ mask &= ~(1UL << loc);
+ }
+ return num_matches;
+}
+
+uint32_t
+rte_member_lookup_multi_bulk_vbf(const struct rte_member_setsum *ss,
+ const void **keys, uint32_t num_keys, uint32_t match_per_key,
+ uint32_t *match_count,
+ member_set_t *set_ids)
+{
+ uint32_t i, k;
+ uint32_t num_matches = 0;
+ uint32_t match_cnt_t;
+ uint32_t mask[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t h1[RTE_MEMBER_LOOKUP_BULK_MAX], h2[RTE_MEMBER_LOOKUP_BULK_MAX];
+ uint32_t bit_loc;
+
+ for (i = 0; i < num_keys; i++)
+ h1[i] = MEMBER_HASH_FUNC(keys[i], ss->key_len,
+ ss->prim_hash_seed);
+ for (i = 0; i < num_keys; i++)
+ h2[i] = MEMBER_HASH_FUNC(&h1[i], sizeof(uint32_t),
+ ss->sec_hash_seed);
+ for (i = 0; i < num_keys; i++) {
+ mask[i] = ~0;
+ for (k = 0; k < ss->num_hashes; k++) {
+ bit_loc = (h1[i] + k * h2[i]) & ss->bit_mask;
+ mask[i] &= test_bit(bit_loc, ss);
+ }
+ }
+ for (i = 0; i < num_keys; i++) {
+ match_cnt_t = 0;
+ while (mask[i]) {
+ uint32_t loc = __builtin_ctzl(mask[i]);
+ set_ids[i * match_per_key + match_cnt_t] = loc + 1;
+ match_cnt_t++;
+ if (match_cnt_t >= match_per_key)
+ break;
+ mask[i] &= ~(1UL << loc);
+ }
+ match_count[i] = match_cnt_t;
+ if (match_cnt_t != 0)
+ num_matches++;
+ }
+ return num_matches;
+}
+
+int
+rte_member_add_vbf(const struct rte_member_setsum *ss,
+ const void *key, member_set_t set_id)
+{
+ uint32_t i, h1, h2;
+ uint32_t bit_loc;
+
+ if (set_id > ss->num_set || set_id == RTE_MEMBER_NO_MATCH)
+ return -EINVAL;
+
+ h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
+ h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t), ss->sec_hash_seed);
+
+ for (i = 0; i < ss->num_hashes; i++) {
+ bit_loc = (h1 + i * h2) & ss->bit_mask;
+ set_bit(bit_loc, ss, set_id);
+ }
+ return 0;
+}
+
+void
+rte_member_free_vbf(struct rte_member_setsum *ss)
+{
+ rte_free(ss->table);
+}
+
+void
+rte_member_reset_vbf(const struct rte_member_setsum *ss)
+{
+ uint32_t *vbf = ss->table;
+ memset(vbf, 0, (ss->num_set * ss->bits) >> 3);
+}
diff --git a/lib/librte_member/rte_member_vbf.h b/lib/librte_member/rte_member_vbf.h
new file mode 100644
index 00000000..5bc158b9
--- /dev/null
+++ b/lib/librte_member/rte_member_vbf.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMBER_VBF_H_
+#define _RTE_MEMBER_VBF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Currently we only support up to 32 sets in vBF */
+#define RTE_MEMBER_MAX_BF 32
+
+int
+rte_member_create_vbf(struct rte_member_setsum *ss,
+ const struct rte_member_parameters *params);
+
+int
+rte_member_lookup_vbf(const struct rte_member_setsum *setsum,
+ const void *key, member_set_t *set_id);
+
+uint32_t
+rte_member_lookup_bulk_vbf(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys,
+ member_set_t *set_ids);
+
+uint32_t
+rte_member_lookup_multi_vbf(const struct rte_member_setsum *setsum,
+ const void *key, uint32_t match_per_key,
+ member_set_t *set_id);
+
+uint32_t
+rte_member_lookup_multi_bulk_vbf(const struct rte_member_setsum *setsum,
+ const void **keys, uint32_t num_keys, uint32_t match_per_key,
+ uint32_t *match_count,
+ member_set_t *set_ids);
+
+int
+rte_member_add_vbf(const struct rte_member_setsum *setsum,
+ const void *key, member_set_t set_id);
+
+void
+rte_member_free_vbf(struct rte_member_setsum *ss);
+
+void
+rte_member_reset_vbf(const struct rte_member_setsum *setsum);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMBER_VBF_H_ */
diff --git a/lib/librte_member/rte_member_version.map b/lib/librte_member/rte_member_version.map
new file mode 100644
index 00000000..019e4cd9
--- /dev/null
+++ b/lib/librte_member/rte_member_version.map
@@ -0,0 +1,16 @@
+DPDK_17.11 {
+ global:
+
+ rte_member_add;
+ rte_member_create;
+ rte_member_delete;
+ rte_member_find_existing;
+ rte_member_free;
+ rte_member_lookup;
+ rte_member_lookup_bulk;
+ rte_member_lookup_multi;
+ rte_member_lookup_multi_bulk;
+ rte_member_reset;
+
+ local: *;
+};
diff --git a/lib/librte_member/rte_member_x86.h b/lib/librte_member/rte_member_x86.h
new file mode 100644
index 00000000..d29dd3fe
--- /dev/null
+++ b/lib/librte_member/rte_member_x86.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMBER_X86_H_
+#define _RTE_MEMBER_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <x86intrin.h>
+
+#if defined(RTE_MACHINE_CPUFLAG_AVX2)
+
+static inline int
+update_entry_search_avx(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ member_set_t set_id)
+{
+ uint32_t hitmask = _mm256_movemask_epi8((__m256i)_mm256_cmpeq_epi16(
+ _mm256_load_si256((__m256i const *)buckets[bucket_id].sigs),
+ _mm256_set1_epi16(tmp_sig)));
+ if (hitmask) {
+ uint32_t hit_idx = __builtin_ctzl(hitmask) >> 1;
+ buckets[bucket_id].sets[hit_idx] = set_id;
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+search_bucket_single_avx(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ member_set_t *set_id)
+{
+ uint32_t hitmask = _mm256_movemask_epi8((__m256i)_mm256_cmpeq_epi16(
+ _mm256_load_si256((__m256i const *)buckets[bucket_id].sigs),
+ _mm256_set1_epi16(tmp_sig)));
+ while (hitmask) {
+ uint32_t hit_idx = __builtin_ctzl(hitmask) >> 1;
+ if (buckets[bucket_id].sets[hit_idx] != RTE_MEMBER_NO_MATCH) {
+ *set_id = buckets[bucket_id].sets[hit_idx];
+ return 1;
+ }
+ hitmask &= ~(3U << ((hit_idx) << 1));
+ }
+ return 0;
+}
+
+static inline void
+search_bucket_multi_avx(uint32_t bucket_id, member_sig_t tmp_sig,
+ struct member_ht_bucket *buckets,
+ uint32_t *counter,
+ uint32_t match_per_key,
+ member_set_t *set_id)
+{
+ uint32_t hitmask = _mm256_movemask_epi8((__m256i)_mm256_cmpeq_epi16(
+ _mm256_load_si256((__m256i const *)buckets[bucket_id].sigs),
+ _mm256_set1_epi16(tmp_sig)));
+ while (hitmask) {
+ uint32_t hit_idx = __builtin_ctzl(hitmask) >> 1;
+ if (buckets[bucket_id].sets[hit_idx] != RTE_MEMBER_NO_MATCH) {
+ set_id[*counter] = buckets[bucket_id].sets[hit_idx];
+ (*counter)++;
+ if (*counter >= match_per_key)
+ return;
+ }
+ hitmask &= ~(3U << ((hit_idx) << 1));
+ }
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMBER_X86_H_ */
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index 7b5bdfee..46654e32 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -35,10 +35,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal -lrte_ring
EXPORT_MAP := rte_mempool_version.map
-LIBABIVER := 2
+LIBABIVER := 3
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 6fc3c9c7..d50dba49 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -128,7 +128,7 @@ static unsigned optimize_object_size(unsigned obj_size)
}
static void
-mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)
+mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
@@ -136,7 +136,7 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)
/* set mempool ptr in header */
hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
hdr->mp = mp;
- hdr->physaddr = physaddr;
+ hdr->iova = iova;
STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
mp->populated_size++;
@@ -238,9 +238,16 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
* Calculate maximum amount of memory required to store given number of objects.
*/
size_t
-rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift)
+rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
+ unsigned int flags)
{
size_t obj_per_page, pg_num, pg_sz;
+ unsigned int mask;
+
+ mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
+ if ((flags & mask) == mask)
+ /* alignment need one additional object */
+ elt_num += 1;
if (total_elt_sz == 0)
return 0;
@@ -263,23 +270,29 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift)
*/
ssize_t
rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
- uint32_t pg_shift)
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
+ uint32_t pg_shift, unsigned int flags)
{
uint32_t elt_cnt = 0;
- phys_addr_t start, end;
- uint32_t paddr_idx;
+ rte_iova_t start, end;
+ uint32_t iova_idx;
size_t pg_sz = (size_t)1 << pg_shift;
+ unsigned int mask;
+
+ mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
+ if ((flags & mask) == mask)
+ /* alignment need one additional object */
+ elt_num += 1;
- /* if paddr is NULL, assume contiguous memory */
- if (paddr == NULL) {
+ /* if iova is NULL, assume contiguous memory */
+ if (iova == NULL) {
start = 0;
end = pg_sz * pg_num;
- paddr_idx = pg_num;
+ iova_idx = pg_num;
} else {
- start = paddr[0];
- end = paddr[0] + pg_sz;
- paddr_idx = 1;
+ start = iova[0];
+ end = iova[0] + pg_sz;
+ iova_idx = 1;
}
while (elt_cnt < elt_num) {
@@ -287,15 +300,15 @@ rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
/* enough contiguous memory, add an object */
start += total_elt_sz;
elt_cnt++;
- } else if (paddr_idx < pg_num) {
+ } else if (iova_idx < pg_num) {
/* no room to store one obj, add a page */
- if (end == paddr[paddr_idx]) {
+ if (end == iova[iova_idx]) {
end += pg_sz;
} else {
- start = paddr[paddr_idx];
- end = paddr[paddr_idx] + pg_sz;
+ start = iova[iova_idx];
+ end = iova[iova_idx] + pg_sz;
}
- paddr_idx++;
+ iova_idx++;
} else {
/* no more page, return how many elements fit */
@@ -303,7 +316,7 @@ rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
}
}
- return (size_t)paddr_idx << pg_shift;
+ return (size_t)iova_idx << pg_shift;
}
/* free a memchunk allocated with rte_memzone_reserve() */
@@ -344,8 +357,8 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
* on error.
*/
int
-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
- phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
unsigned total_elt_sz;
@@ -354,6 +367,11 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
struct rte_mempool_memhdr *memhdr;
int ret;
+ /* Notify memory area to mempool */
+ ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
+ if (ret != -ENOTSUP && ret < 0)
+ return ret;
+
/* create the internal ring if not already done */
if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
ret = rte_mempool_ops_alloc(mp);
@@ -368,29 +386,42 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ /* Detect pool area has sufficient space for elements */
+ if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
+ if (len < total_elt_sz * mp->size) {
+ RTE_LOG(ERR, MEMPOOL,
+ "pool area %" PRIx64 " not enough\n",
+ (uint64_t)len);
+ return -ENOSPC;
+ }
+ }
+
memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
if (memhdr == NULL)
return -ENOMEM;
memhdr->mp = mp;
memhdr->addr = vaddr;
- memhdr->phys_addr = paddr;
+ memhdr->iova = iova;
memhdr->len = len;
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS)
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+ else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
off += mp->header_size;
- if (paddr == RTE_BAD_PHYS_ADDR)
+ if (iova == RTE_BAD_IOVA)
mempool_add_elem(mp, (char *)vaddr + off,
- RTE_BAD_PHYS_ADDR);
+ RTE_BAD_IOVA);
else
- mempool_add_elem(mp, (char *)vaddr + off, paddr + off);
+ mempool_add_elem(mp, (char *)vaddr + off, iova + off);
off += mp->elt_size + mp->trailer_size;
i++;
}
@@ -404,12 +435,20 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
return i;
}
+int
+rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+ phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ return rte_mempool_populate_iova(mp, vaddr, paddr, len, free_cb, opaque);
+}
+
/* Add objects in the pool, using a table of physical pages. Return the
* number of objects added, or a negative value on error.
*/
int
-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
{
uint32_t i, n;
@@ -421,18 +460,18 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
return -EEXIST;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_IOVA,
pg_num * pg_sz, free_cb, opaque);
for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
/* populate with the largest group of contiguous pages */
for (n = 1; (i + n) < pg_num &&
- paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)
+ iova[i + n - 1] + pg_sz == iova[i + n]; n++)
;
- ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
- paddr[i], n * pg_sz, free_cb, opaque);
+ ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
+ iova[i], n * pg_sz, free_cb, opaque);
if (ret < 0) {
rte_mempool_free_memchunks(mp);
return ret;
@@ -444,6 +483,15 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
return cnt;
}
+int
+rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ return rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ free_cb, opaque);
+}
+
/* Populate the mempool with a virtual area. Return the number of
* objects added, or a negative value on error.
*/
@@ -452,7 +500,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
- phys_addr_t paddr;
+ rte_iova_t iova;
size_t off, phys_len;
int ret, cnt = 0;
@@ -466,33 +514,30 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
return -EINVAL;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
len, free_cb, opaque);
for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) {
- paddr = rte_mem_virt2phy(addr + off);
- /* required for xen_dom0 to get the machine address */
- paddr = rte_mem_phy2mch(-1, paddr);
+ iova = rte_mem_virt2iova(addr + off);
- if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
+ if (iova == RTE_BAD_IOVA && rte_eal_has_hugepages()) {
ret = -EINVAL;
goto fail;
}
/* populate with the largest group of contiguous pages */
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
- phys_addr_t paddr_tmp;
+ rte_iova_t iova_tmp;
- paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
- paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp);
+ iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
- if (paddr_tmp != paddr + phys_len)
+ if (iova_tmp != iova + phys_len)
break;
}
- ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+ ret = rte_mempool_populate_iova(mp, addr + off, iova,
phys_len, free_cb, opaque);
if (ret < 0)
goto fail;
@@ -515,23 +560,29 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
int
rte_mempool_populate_default(struct rte_mempool *mp)
{
- int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
size_t size, total_elt_sz, align, pg_sz, pg_shift;
- phys_addr_t paddr;
+ rte_iova_t iova;
unsigned mz_id, n;
+ unsigned int mp_flags;
int ret;
/* mempool must not be populated */
if (mp->nb_mem_chunks != 0)
return -EEXIST;
- if (rte_xen_dom0_supported()) {
- pg_sz = RTE_PGSIZE_2M;
- pg_shift = rte_bsf32(pg_sz);
- align = pg_sz;
- } else if (rte_eal_has_hugepages()) {
+ /* Get mempool capabilities */
+ mp_flags = 0;
+ ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
+ if ((ret < 0) && (ret != -ENOTSUP))
+ return ret;
+
+ /* update mempool capabilities */
+ mp->flags |= mp_flags;
+
+ if (rte_eal_has_hugepages()) {
pg_shift = 0; /* not needed, zone is physically contiguous */
pg_sz = 0;
align = RTE_CACHE_LINE_SIZE;
@@ -543,7 +594,8 @@ rte_mempool_populate_default(struct rte_mempool *mp)
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
- size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift);
+ size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift,
+ mp->flags);
ret = snprintf(mz_name, sizeof(mz_name),
RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
@@ -564,13 +616,13 @@ rte_mempool_populate_default(struct rte_mempool *mp)
}
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- paddr = RTE_BAD_PHYS_ADDR;
+ iova = RTE_BAD_IOVA;
else
- paddr = mz->phys_addr;
+ iova = mz->iova;
- if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
- ret = rte_mempool_populate_phys(mp, mz->addr,
- paddr, mz->len,
+ if (rte_eal_has_hugepages())
+ ret = rte_mempool_populate_iova(mp, mz->addr,
+ iova, mz->len,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
else
@@ -600,7 +652,8 @@ get_anon_size(const struct rte_mempool *mp)
pg_sz = getpagesize();
pg_shift = rte_bsf32(pg_sz);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift);
+ size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift,
+ mp->flags);
return size;
}
@@ -742,7 +795,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
struct rte_tailq_entry *te = NULL;
const struct rte_memzone *mz = NULL;
size_t mempool_size;
- int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
struct rte_mempool_objsz objsz;
unsigned lcore_id;
int ret;
@@ -922,7 +975,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift)
{
struct rte_mempool *mp = NULL;
int ret;
@@ -934,7 +987,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
obj_init, obj_init_arg, socket_id, flags);
/* check that we have both VA and PA */
- if (paddr == NULL) {
+ if (iova == NULL) {
rte_errno = EINVAL;
return NULL;
}
@@ -954,7 +1007,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
if (mp_init)
mp_init(mp, mp_init_arg);
- ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ ret = rte_mempool_populate_iova_tab(mp, vaddr, iova, pg_num, pg_shift,
NULL, NULL);
if (ret < 0 || ret != (int)mp->size)
goto fail;
@@ -1177,7 +1230,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
fprintf(f, " pool=%p\n", mp->pool_data);
- fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->mz->phys_addr);
+ fprintf(f, " iova=0x%" PRIx64 "\n", mp->mz->iova);
fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks);
fprintf(f, " size=%"PRIu32"\n", mp->size);
fprintf(f, " populated_size=%"PRIu32"\n", mp->populated_size);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 76b5b3b1..721227f6 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -157,7 +157,11 @@ struct rte_mempool_objsz {
struct rte_mempool_objhdr {
STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the object. */
- phys_addr_t physaddr; /**< Physical address of the object. */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the object. */
+ phys_addr_t physaddr; /**< deprecated - Physical address of the object. */
+ };
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
#endif
@@ -203,7 +207,11 @@ struct rte_mempool_memhdr {
STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the chunk */
void *addr; /**< Virtual address of the chunk */
- phys_addr_t phys_addr; /**< Physical address of the chunk */
+ RTE_STD_C11
+ union {
+ rte_iova_t iova; /**< IO address of the chunk */
+ phys_addr_t phys_addr; /**< Physical address of the chunk */
+ };
size_t len; /**< length of the chunk */
rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
void *opaque; /**< Argument passed to the free callback */
@@ -226,7 +234,7 @@ struct rte_mempool {
};
void *pool_config; /**< optional args for ops alloc. */
const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
- int flags; /**< Flags of the mempool. */
+ unsigned int flags; /**< Flags of the mempool. */
int socket_id; /**< Socket id passed at create. */
uint32_t size; /**< Max size of the mempool. */
uint32_t cache_size;
@@ -265,6 +273,24 @@ struct rte_mempool {
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
+/**
+ * This capability flag is advertised by a mempool handler, if the whole
+ * memory area containing the objects must be physically contiguous.
+ * Note: This flag should not be passed by application.
+ */
+#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
+/**
+ * This capability flag is advertised by a mempool handler. Used for a case
+ * where mempool driver wants object start address(vaddr) aligned to block
+ * size(/ total element size).
+ *
+ * Note:
+ * - This flag should not be passed by application.
+ * Flag used for mempool driver only.
+ * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with
+ * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS.
+ */
+#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
/**
* @internal When debug is enabled, store some statistics.
@@ -389,6 +415,18 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+/**
+ * Get the mempool capabilities.
+ */
+typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
+ unsigned int *flags);
+
+/**
+ * Notify new memory area to mempool.
+ */
+typedef int (*rte_mempool_ops_register_memory_area_t)
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -397,6 +435,14 @@ struct rte_mempool_ops {
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ /**
+ * Get the mempool capabilities
+ */
+ rte_mempool_get_capabilities_t get_capabilities;
+ /**
+ * Notify new memory area to mempool
+ */
+ rte_mempool_ops_register_memory_area_t register_memory_area;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -509,6 +555,43 @@ unsigned
rte_mempool_ops_get_count(const struct rte_mempool *mp);
/**
+ * @internal wrapper for mempool_ops get_capabilities callback.
+ *
+ * @param mp [in]
+ * Pointer to the memory pool.
+ * @param flags [out]
+ * Pointer to the mempool flags.
+ * @return
+ * - 0: Success; The mempool driver has advertised his pool capabilities in
+ * flags param.
+ * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
+ * - Otherwise, pool create fails.
+ */
+int
+rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
+ unsigned int *flags);
+/**
+ * @internal wrapper for mempool_ops register_memory_area callback.
+ * API to notify the mempool handler when a new memory area is added to pool.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param vaddr
+ * Pointer to the buffer virtual address.
+ * @param iova
+ * Pointer to the buffer IO address.
+ * @param len
+ * Pool size.
+ * @return
+ * - 0: Success;
+ * - -ENOTSUP - doesn't support register_memory_area ops (valid error case).
+ * - Otherwise, rte_mempool_populate_phys fails thus pool create fails.
+ */
+int
+rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
+ char *vaddr, rte_iova_t iova, size_t len);
+
+/**
* @internal wrapper for mempool_ops free callback.
*
* @param mp
@@ -722,11 +805,10 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* @param vaddr
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @return
@@ -739,7 +821,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
/**
* Create an empty mempool
@@ -798,7 +880,7 @@ rte_mempool_free(struct rte_mempool *mp);
* Add a virtually and physically contiguous memory chunk in the pool
* where objects can be instantiated.
*
- * If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR),
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
* the chunk doesn't need to be physically contiguous (only virtually),
* and allocated objects may span two pages.
*
@@ -806,8 +888,8 @@ rte_mempool_free(struct rte_mempool *mp);
* A pointer to the mempool structure.
* @param vaddr
* The virtual address of memory that should be used to store objects.
- * @param paddr
- * The physical address
+ * @param iova
+ * The IO address
* @param len
* The length of memory in bytes.
* @param free_cb
@@ -819,6 +901,11 @@ rte_mempool_free(struct rte_mempool *mp);
* On error, the chunk is not added in the memory list of the
* mempool and a negative errno is returned.
*/
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+__rte_deprecated
int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque);
@@ -827,18 +914,17 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* Add physical memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
- * be instantiated. The physical addresses corresponding to the virtual
- * area are described in paddr[], pg_num, pg_shift.
+ * be instantiated. The IO addresses corresponding to the virtual
+ * area are described in iova[], pg_num, pg_shift.
*
* @param mp
* A pointer to the mempool structure.
* @param vaddr
* The virtual address of memory that should be used to store objects.
- * @param paddr
- * An array of physical addresses of each page composing the virtual
- * area.
+ * @param iova
+ * An array of IO addresses of each page composing the virtual area.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
* @param free_cb
@@ -850,6 +936,11 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* On error, the chunks are not added in the memory list of the
* mempool and a negative errno is returned.
*/
+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
+ const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
+
+__rte_deprecated
int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
@@ -1034,13 +1125,10 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
* positive.
* @param cache
* A pointer to a mempool cache structure. May be NULL if not needed.
- * @param flags
- * The flags used for the mempool creation.
- * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
static __rte_always_inline void
__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, struct rte_mempool_cache *cache)
+ unsigned int n, struct rte_mempool_cache *cache)
{
void **cache_objs;
@@ -1096,14 +1184,10 @@ ring_enqueue:
* The number of objects to add in the mempool from the obj_table.
* @param cache
* A pointer to a mempool cache structure. May be NULL if not needed.
- * @param flags
- * The flags used for the mempool creation.
- * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
static __rte_always_inline void
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, struct rte_mempool_cache *cache,
- __rte_unused int flags)
+ unsigned int n, struct rte_mempool_cache *cache)
{
__mempool_check_cookies(mp, obj_table, n, 0);
__mempool_generic_put(mp, obj_table, n, cache);
@@ -1125,11 +1209,11 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
*/
static __rte_always_inline void
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
+ unsigned int n)
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
- rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags);
+ rte_mempool_generic_put(mp, obj_table, n, cache);
}
/**
@@ -1160,16 +1244,13 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* The number of objects to get, must be strictly positive.
* @param cache
* A pointer to a mempool cache structure. May be NULL if not needed.
- * @param flags
- * The flags used for the mempool creation.
- * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
static __rte_always_inline int
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned n, struct rte_mempool_cache *cache)
+ unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
uint32_t index, len;
@@ -1241,16 +1322,13 @@ ring_dequeue:
* The number of objects to get from mempool to obj_table.
* @param cache
* A pointer to a mempool cache structure. May be NULL if not needed.
- * @param flags
- * The flags used for the mempool creation.
- * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
static __rte_always_inline int
-rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
- struct rte_mempool_cache *cache, __rte_unused int flags)
+rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned int n, struct rte_mempool_cache *cache)
{
int ret;
ret = __mempool_generic_get(mp, obj_table, n, cache);
@@ -1282,11 +1360,11 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
static __rte_always_inline int
-rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
{
struct rte_mempool_cache *cache;
cache = rte_mempool_default_cache(mp, rte_lcore_id());
- return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags);
+ return rte_mempool_generic_get(mp, obj_table, n, cache);
}
/**
@@ -1383,24 +1461,29 @@ rte_mempool_empty(const struct rte_mempool *mp)
}
/**
- * Return the physical address of elt, which is an element of the pool mp.
+ * Return the IO address of elt, which is an element of the pool mp.
*
- * @param mp
- * A pointer to the mempool structure.
* @param elt
* A pointer (virtual address) to the element of the pool.
* @return
- * The physical address of the elt element.
+ * The IO address of the elt element.
* If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
- * returned value is RTE_BAD_PHYS_ADDR.
+ * returned value is RTE_BAD_IOVA.
*/
-static inline phys_addr_t
-rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
+static inline rte_iova_t
+rte_mempool_virt2iova(const void *elt)
{
const struct rte_mempool_objhdr *hdr;
hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
sizeof(*hdr));
- return hdr->physaddr;
+ return hdr->iova;
+}
+
+__rte_deprecated
+static inline phys_addr_t
+rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
+{
+ return rte_mempool_virt2iova(elt);
}
/**
@@ -1489,11 +1572,13 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
* by rte_mempool_calc_obj_size().
* @param pg_shift
* LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param flags
+ * The mempool flags.
* @return
* Required memory size aligned at page boundary.
*/
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
- uint32_t pg_shift);
+ uint32_t pg_shift, unsigned int flags);
/**
* Get the size of memory required to store mempool elements.
@@ -1509,13 +1594,14 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
* @param total_elt_sz
* The size of each element, including header and trailer, as returned
* by rte_mempool_calc_obj_size().
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
+ * @param iova
+ * Array of IO addresses of the pages that comprises given memory buffer.
* @param pg_num
- * Number of elements in the paddr array.
+ * Number of elements in the iova array.
* @param pg_shift
* LOG2 of the physical pages size.
+ * @param flags
+ * The mempool flags.
* @return
* On success, the number of bytes needed to store given number of
* objects, aligned to the given page size. If the provided memory
@@ -1523,8 +1609,8 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
* is the actual number of elements that can be stored in that buffer.
*/
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
- uint32_t pg_shift);
+ size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
+ uint32_t pg_shift, unsigned int flags);
/**
* Walk list of all memory pools
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index 5f24de25..92b9f90c 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -37,6 +37,7 @@
#include <rte_mempool.h>
#include <rte_errno.h>
+#include <rte_dev.h>
/* indirect jump table to support external memory pools. */
struct rte_mempool_ops_table rte_mempool_ops_table = {
@@ -85,6 +86,8 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
+ ops->get_capabilities = h->get_capabilities;
+ ops->register_memory_area = h->register_memory_area;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
@@ -123,6 +126,32 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp)
return ops->get_count(mp);
}
+/* wrapper to get external mempool capabilities. */
+int
+rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
+ unsigned int *flags)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP);
+ return ops->get_capabilities(mp, flags);
+}
+
+/* wrapper to notify new memory area to external mempool */
+int
+rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
+ rte_iova_t iova, size_t len)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP);
+ return ops->register_memory_area(mp, vaddr, iova, len);
+}
+
/* sets mempool ops previously registered by rte_mempool_register_ops. */
int
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index f9c07944..62b76f91 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -41,3 +41,13 @@ DPDK_16.07 {
rte_mempool_set_ops_byname;
} DPDK_2.0;
+
+DPDK_17.11 {
+ global:
+
+ rte_mempool_ops_get_capabilities;
+ rte_mempool_ops_register_memory_area;
+ rte_mempool_populate_iova;
+ rte_mempool_populate_iova_tab;
+
+} DPDK_16.07;
diff --git a/lib/librte_meter/Makefile b/lib/librte_meter/Makefile
index 539bfddd..bfeb5d60 100644
--- a/lib/librte_meter/Makefile
+++ b/lib/librte_meter/Makefile
@@ -40,6 +40,7 @@ CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lm
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_meter_version.map
diff --git a/lib/librte_metrics/Makefile b/lib/librte_metrics/Makefile
index d4990e83..a6efba4a 100644
--- a/lib/librte_metrics/Makefile
+++ b/lib/librte_metrics/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_metrics.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_metrics_version.map
diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c
index b66a72bb..d9404001 100644
--- a/lib/librte_metrics/rte_metrics.c
+++ b/lib/librte_metrics/rte_metrics.c
@@ -115,7 +115,7 @@ rte_metrics_reg_name(const char *name)
int
rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
{
- struct rte_metrics_meta_s *entry;
+ struct rte_metrics_meta_s *entry = NULL;
struct rte_metrics_data_s *stats;
const struct rte_memzone *memzone;
uint16_t idx_name;
diff --git a/lib/librte_net/Makefile b/lib/librte_net/Makefile
index 56727c4d..50c358e5 100644
--- a/lib/librte_net/Makefile
+++ b/lib/librte_net/Makefile
@@ -34,6 +34,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_net.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_mbuf -lrte_eal
EXPORT_MAP := rte_net_version.map
LIBABIVER := 1
@@ -42,7 +43,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_NET) := rte_net.c
SRCS-$(CONFIG_RTE_LIBRTE_NET) += rte_net_crc.c
# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h rte_esp.h
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_sctp.h rte_icmp.h rte_arp.h
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_ether.h rte_gre.h rte_net.h
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_net_crc.h
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.h b/lib/librte_net/rte_esp.h
index 598adc6f..e228af09 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.h
+++ b/lib/librte_net/rte_esp.h
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,31 +30,31 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_ETH_XENVIRT_H_
-#define _RTE_ETH_XENVIRT_H_
+#ifndef _RTE_ESP_H_
+#define _RTE_ESP_H_
+
+/**
+ * @file
+ *
+ * ESP-related defines
+ */
+
+#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
-#include <rte_mempool.h>
-
/**
- * Creates mempool for xen virtio PMD.
- * This function uses memzone_reserve to allocate memory for meta data,
- * and uses grant alloc driver to allocate memory for data area.
- * The input parameters are exactly the same as rte_mempool_create.
+ * ESP Header
*/
-struct rte_mempool *
-rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags);
-
+struct esp_hdr {
+ uint32_t spi; /**< Security Parameters Index */
+ uint32_t seq; /**< packet sequence number */
+} __attribute__((__packed__));
#ifdef __cplusplus
}
#endif
-#endif
+#endif /* RTE_ESP_H_ */
diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h
index 917d42a1..06d7b486 100644
--- a/lib/librte_net/rte_ether.h
+++ b/lib/librte_net/rte_ether.h
@@ -358,7 +358,7 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)
return -1;
struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
- m->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ m->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
/* Copy ether header over rather than moving whole packet */
diff --git a/lib/librte_net/rte_net.c b/lib/librte_net/rte_net.c
index a8c7aff9..a3ca0403 100644
--- a/lib/librte_net/rte_net.c
+++ b/lib/librte_net/rte_net.c
@@ -396,6 +396,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
return pkt_type;
+ hdr_lens->inner_l2_len = 0;
if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
if (unlikely(eh == NULL))
diff --git a/lib/librte_net/rte_net_crc.c b/lib/librte_net/rte_net_crc.c
index 661fe322..0c1bf51a 100644
--- a/lib/librte_net/rte_net_crc.c
+++ b/lib/librte_net/rte_net_crc.c
@@ -205,8 +205,7 @@ rte_net_crc_calc(const void *data,
}
/* Select highest available crc algorithm as default one */
-static inline void __attribute__((constructor))
-rte_net_crc_init(void)
+RTE_INIT(rte_net_crc_init)
{
enum rte_net_crc_alg alg = RTE_NET_CRC_SCALAR;
diff --git a/lib/librte_pci/Makefile b/lib/librte_pci/Makefile
new file mode 100644
index 00000000..fe213ea6
--- /dev/null
+++ b/lib/librte_pci/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 6WIND S.A.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of 6WIND nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pci.a
+
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += $(WERROR_FLAGS) -O3
+LDLIBS += -lrte_eal
+
+EXPORT_MAP := rte_pci_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_PCI) += rte_pci.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PCI)-include += rte_pci.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pci/rte_pci.c b/lib/librte_pci/rte_pci.c
new file mode 100644
index 00000000..0160fc1e
--- /dev/null
+++ b/lib/librte_pci/rte_pci.c
@@ -0,0 +1,212 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+
+#include "rte_pci.h"
+
+static inline const char *
+get_u8_pciaddr_field(const char *in, void *_u8, char dlm)
+{
+ unsigned long val;
+ uint8_t *u8 = _u8;
+ char *end;
+
+ errno = 0;
+ val = strtoul(in, &end, 16);
+ if (errno != 0 || end[0] != dlm || val > UINT8_MAX) {
+ errno = errno ? errno : EINVAL;
+ return NULL;
+ }
+ *u8 = (uint8_t)val;
+ return end + 1;
+}
+
+static int
+pci_bdf_parse(const char *input, struct rte_pci_addr *dev_addr)
+{
+ const char *in = input;
+
+ dev_addr->domain = 0;
+ in = get_u8_pciaddr_field(in, &dev_addr->bus, ':');
+ if (in == NULL)
+ return -EINVAL;
+ in = get_u8_pciaddr_field(in, &dev_addr->devid, '.');
+ if (in == NULL)
+ return -EINVAL;
+ in = get_u8_pciaddr_field(in, &dev_addr->function, '\0');
+ if (in == NULL)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+pci_dbdf_parse(const char *input, struct rte_pci_addr *dev_addr)
+{
+ const char *in = input;
+ unsigned long val;
+ char *end;
+
+ errno = 0;
+ val = strtoul(in, &end, 16);
+ if (errno != 0 || end[0] != ':' || val > UINT16_MAX)
+ return -EINVAL;
+ dev_addr->domain = (uint16_t)val;
+ in = end + 1;
+ in = get_u8_pciaddr_field(in, &dev_addr->bus, ':');
+ if (in == NULL)
+ return -EINVAL;
+ in = get_u8_pciaddr_field(in, &dev_addr->devid, '.');
+ if (in == NULL)
+ return -EINVAL;
+ in = get_u8_pciaddr_field(in, &dev_addr->function, '\0');
+ if (in == NULL)
+ return -EINVAL;
+ return 0;
+}
+
+int
+eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr)
+{
+ return pci_bdf_parse(input, dev_addr);
+}
+
+int
+eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
+{
+ return pci_dbdf_parse(input, dev_addr);
+}
+
+void
+rte_pci_device_name(const struct rte_pci_addr *addr,
+ char *output, size_t size)
+{
+ RTE_VERIFY(size >= PCI_PRI_STR_SIZE);
+ RTE_VERIFY(snprintf(output, size, PCI_PRI_FMT,
+ addr->domain, addr->bus,
+ addr->devid, addr->function) >= 0);
+}
+
+int
+rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
+ const struct rte_pci_addr *addr2)
+{
+ return rte_pci_addr_cmp(addr, addr2);
+}
+
+int
+rte_pci_addr_cmp(const struct rte_pci_addr *addr,
+ const struct rte_pci_addr *addr2)
+{
+ uint64_t dev_addr, dev_addr2;
+
+ if ((addr == NULL) || (addr2 == NULL))
+ return -1;
+
+ dev_addr = ((uint64_t)addr->domain << 24) |
+ (addr->bus << 16) | (addr->devid << 8) | addr->function;
+ dev_addr2 = ((uint64_t)addr2->domain << 24) |
+ (addr2->bus << 16) | (addr2->devid << 8) | addr2->function;
+
+ if (dev_addr > dev_addr2)
+ return 1;
+ else if (dev_addr < dev_addr2)
+ return -1;
+ else
+ return 0;
+}
+
+int
+rte_pci_addr_parse(const char *str, struct rte_pci_addr *addr)
+{
+ if (pci_bdf_parse(str, addr) == 0 ||
+ pci_dbdf_parse(str, addr) == 0)
+ return 0;
+ return -1;
+}
+
+
+/* map a particular resource from a file */
+void *
+pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int additional_flags)
+{
+ void *mapaddr;
+
+ /* Map the PCI memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | additional_flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
+ __func__, fd, requested_addr,
+ (unsigned long)size, (unsigned long)offset,
+ strerror(errno), mapaddr);
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
+
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+pci_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the PCI memory resource of device */
+ if (munmap(requested_addr, size)) {
+ RTE_LOG(ERR, EAL, "%s(): cannot munmap(%p, 0x%lx): %s\n",
+ __func__, requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
+ requested_addr);
+}
diff --git a/lib/librte_pci/rte_pci.h b/lib/librte_pci/rte_pci.h
new file mode 100644
index 00000000..4f2cd187
--- /dev/null
+++ b/lib/librte_pci/rte_pci.h
@@ -0,0 +1,263 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright 2013-2014 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PCI_H_
+#define _RTE_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Library
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+
+/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
+#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
+#define PCI_PRI_STR_SIZE sizeof("XXXXXXXX:XX:XX.X")
+
+/** Short formatting string, without domain, for PCI device: Ex: 00:01.0 */
+#define PCI_SHORT_PRI_FMT "%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
+
+/** Nb. of values in PCI device identifier format string. */
+#define PCI_FMT_NVAL 4
+
+/** Nb. of values in PCI resource format. */
+#define PCI_RESOURCE_FMT_NVAL 3
+
+/** Maximum number of PCI resources. */
+#define PCI_MAX_RESOURCE 6
+
+/**
+ * A structure describing an ID for a PCI driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_pci_id {
+ uint32_t class_id; /**< Class ID or RTE_CLASS_ANY_ID. */
+ uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */
+ uint16_t device_id; /**< Device ID or PCI_ANY_ID. */
+ uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
+ uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */
+};
+
+/**
+ * A structure describing the location of a PCI device.
+ */
+struct rte_pci_addr {
+ uint32_t domain; /**< Device domain */
+ uint8_t bus; /**< Device bus */
+ uint8_t devid; /**< Device ID */
+ uint8_t function; /**< Device function. */
+};
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+#define RTE_CLASS_ANY_ID (0xffffff)
+
+/**
+ * A structure describing a PCI mapping.
+ */
+struct pci_map {
+ void *addr;
+ char *path;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t phaddr;
+};
+
+struct pci_msix_table {
+ int bar_index;
+ uint32_t offset;
+ uint32_t size;
+};
+
+/**
+ * A structure describing a mapped PCI resource.
+ * For multi-process we need to reproduce all PCI mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_pci_resource {
+ TAILQ_ENTRY(mapped_pci_resource) next;
+
+ struct rte_pci_addr pci_addr;
+ char path[PATH_MAX];
+ int nb_maps;
+ struct pci_map maps[PCI_MAX_RESOURCE];
+ struct pci_msix_table msix_table;
+};
+
+
+/** mapped pci device list */
+TAILQ_HEAD(mapped_pci_res_list, mapped_pci_resource);
+
+/**
+ * @deprecated
+ * Utility function to produce a PCI Bus-Device-Function value
+ * given a string representation. Assumes that the BDF is provided without
+ * a domain prefix (i.e. domain returned is always 0)
+ *
+ * @param input
+ * The input string to be parsed. Should have the format XX:XX.X
+ * @param dev_addr
+ * The PCI Bus-Device-Function address to be returned.
+ * Domain will always be returned as 0
+ * @return
+ * 0 on success, negative on error.
+ */
+int eal_parse_pci_BDF(const char *input, struct rte_pci_addr *dev_addr);
+
+/**
+ * @deprecated
+ * Utility function to produce a PCI Bus-Device-Function value
+ * given a string representation. Assumes that the BDF is provided including
+ * a domain prefix.
+ *
+ * @param input
+ * The input string to be parsed. Should have the format XXXX:XX:XX.X
+ * @param dev_addr
+ * The PCI Bus-Device-Function address to be returned
+ * @return
+ * 0 on success, negative on error.
+ */
+int eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr);
+
+/**
+ * Utility function to write a pci device name, this device name can later be
+ * used to retrieve the corresponding rte_pci_addr using eal_parse_pci_*
+ * BDF helpers.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address
+ * @param output
+ * The output buffer string
+ * @param size
+ * The output buffer size
+ */
+void rte_pci_device_name(const struct rte_pci_addr *addr,
+ char *output, size_t size);
+
+/**
+ * @deprecated
+ * Utility function to compare two PCI device addresses.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to compare
+ * @param addr2
+ * The PCI Bus-Device-Function address to compare
+ * @return
+ * 0 on equal PCI address.
+ * Positive on addr is greater than addr2.
+ * Negative on addr is less than addr2, or error.
+ */
+int rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
+ const struct rte_pci_addr *addr2);
+
+/**
+ * Utility function to compare two PCI device addresses.
+ *
+ * @param addr
+ * The PCI Bus-Device-Function address to compare
+ * @param addr2
+ * The PCI Bus-Device-Function address to compare
+ * @return
+ * 0 on equal PCI address.
+ * Positive on addr is greater than addr2.
+ * Negative on addr is less than addr2, or error.
+ */
+int rte_pci_addr_cmp(const struct rte_pci_addr *addr,
+ const struct rte_pci_addr *addr2);
+
+
+/**
+ * Utility function to parse a string into a PCI location.
+ *
+ * @param str
+ * The string to parse
+ * @param addr
+ * The reference to the structure where the location
+ * is stored.
+ * @return
+ * 0 on success
+ * <0 otherwise
+ */
+int rte_pci_addr_parse(const char *str, struct rte_pci_addr *addr);
+
+/**
+ * Map a particular resource from a file.
+ *
+ * @param requested_addr
+ * The starting address for the new mapping range.
+ * @param fd
+ * The file descriptor.
+ * @param offset
+ * The offset for the mapping range.
+ * @param size
+ * The size for the mapping range.
+ * @param additional_flags
+ * The additional flags for the mapping range.
+ * @return
+ * - On success, the function returns a pointer to the mapped area.
+ * - On error, the value MAP_FAILED is returned.
+ */
+void *pci_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+
+/**
+ * Unmap a particular resource.
+ *
+ * @param requested_addr
+ * The address for the unmapping range.
+ * @param size
+ * The size for the unmapping range.
+ */
+void pci_unmap_resource(void *requested_addr, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PCI_H_ */
diff --git a/lib/librte_pci/rte_pci_version.map b/lib/librte_pci/rte_pci_version.map
new file mode 100644
index 00000000..15d93d95
--- /dev/null
+++ b/lib/librte_pci/rte_pci_version.map
@@ -0,0 +1,15 @@
+DPDK_17.11 {
+ global:
+
+ eal_parse_pci_BDF;
+ eal_parse_pci_DomBDF;
+ rte_pci_addr_cmp;
+ rte_pci_addr_parse;
+ rte_pci_device_name;
+ pci_map_resource;
+ pci_unmap_resource;
+ rte_eal_compare_pci_addr;
+ rte_pci_device_name;
+
+ local: *;
+};
diff --git a/lib/librte_pdump/Makefile b/lib/librte_pdump/Makefile
index 1c03bcbb..11c3e4e9 100644
--- a/lib/librte_pdump/Makefile
+++ b/lib/librte_pdump/Makefile
@@ -37,10 +37,11 @@ LIB = librte_pdump.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
CFLAGS += -D_GNU_SOURCE
LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev
EXPORT_MAP := rte_pdump_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PDUMP) := rte_pdump.c
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index 729e79a3..e6182d35 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -207,7 +207,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
}
static uint16_t
-pdump_rx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_params)
@@ -217,7 +217,7 @@ pdump_rx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
}
static uint16_t
-pdump_tx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
{
pdump_copy(pkts, nb_pkts, user_params);
@@ -225,7 +225,7 @@ pdump_tx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
}
static int
-pdump_regitser_rx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@@ -279,7 +279,7 @@ pdump_regitser_rx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
}
static int
-pdump_regitser_tx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@@ -337,7 +337,7 @@ static int
set_pdump_rxtx_cbs(struct pdump_request *p)
{
uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
- uint8_t port;
+ uint16_t port;
int ret = 0;
uint32_t flags;
uint16_t operation;
@@ -764,7 +764,7 @@ pdump_validate_flags(uint32_t flags)
}
static int
-pdump_validate_port(uint8_t port, char *name)
+pdump_validate_port(uint16_t port, char *name)
{
int ret = 0;
@@ -828,7 +828,7 @@ pdump_prepare_client_request(char *device, uint16_t queue,
}
int
-rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
struct rte_ring *ring,
struct rte_mempool *mp,
void *filter)
@@ -876,7 +876,7 @@ rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
}
int
-rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags)
+rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
{
int ret = 0;
char name[DEVICE_ID_SIZE];
diff --git a/lib/librte_pdump/rte_pdump.h b/lib/librte_pdump/rte_pdump.h
index ba6e39b0..4ec0a106 100644
--- a/lib/librte_pdump/rte_pdump.h
+++ b/lib/librte_pdump/rte_pdump.h
@@ -113,7 +113,7 @@ rte_pdump_uninit(void);
*/
int
-rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
struct rte_ring *ring,
struct rte_mempool *mp,
void *filter);
@@ -136,7 +136,7 @@ rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
*/
int
-rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags);
+rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags);
/**
* Enables packet capturing on given device id and queue.
diff --git a/lib/librte_pipeline/Makefile b/lib/librte_pipeline/Makefile
index 7a835fd5..a8285738 100644
--- a/lib/librte_pipeline/Makefile
+++ b/lib/librte_pipeline/Makefile
@@ -38,6 +38,8 @@ LIB = librte_pipeline.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_table
+LDLIBS += -lrte_port
EXPORT_MAP := rte_pipeline_version.map
diff --git a/lib/librte_pipeline/rte_pipeline.c b/lib/librte_pipeline/rte_pipeline.c
index 7f8fbac5..8611a88b 100644
--- a/lib/librte_pipeline/rte_pipeline.c
+++ b/lib/librte_pipeline/rte_pipeline.c
@@ -36,7 +36,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
diff --git a/lib/librte_port/Makefile b/lib/librte_port/Makefile
index 76629a13..139dc59a 100644
--- a/lib/librte_port/Makefile
+++ b/lib/librte_port/Makefile
@@ -38,6 +38,11 @@ LIB = librte_port.a
ifeq ($(CONFIG_RTE_PORT_PCAP),y)
LDLIBS += -lpcap
endif
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev
+LDLIBS += -lrte_ip_frag -lrte_sched
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+LDLIBS += -lrte_kni
+endif
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/lib/librte_port/rte_port_ethdev.c b/lib/librte_port/rte_port_ethdev.c
index d5c5fba5..4ed10f27 100644
--- a/lib/librte_port/rte_port_ethdev.c
+++ b/lib/librte_port/rte_port_ethdev.c
@@ -60,7 +60,7 @@ struct rte_port_ethdev_reader {
struct rte_port_in_stats stats;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
@@ -156,7 +156,7 @@ struct rte_port_ethdev_writer {
uint16_t tx_buf_count;
uint64_t bsz_mask;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
@@ -337,7 +337,7 @@ struct rte_port_ethdev_writer_nodrop {
uint64_t bsz_mask;
uint64_t n_retries;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
diff --git a/lib/librte_port/rte_port_ethdev.h b/lib/librte_port/rte_port_ethdev.h
index 201a79e4..f5ed9ab2 100644
--- a/lib/librte_port/rte_port_ethdev.h
+++ b/lib/librte_port/rte_port_ethdev.h
@@ -54,7 +54,7 @@ extern "C" {
/** ethdev_reader port parameters */
struct rte_port_ethdev_reader_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
@@ -66,7 +66,7 @@ extern struct rte_port_in_ops rte_port_ethdev_reader_ops;
/** ethdev_writer port parameters */
struct rte_port_ethdev_writer_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
@@ -82,7 +82,7 @@ extern struct rte_port_out_ops rte_port_ethdev_writer_ops;
/** ethdev_writer_nodrop port parameters */
struct rte_port_ethdev_writer_nodrop_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
diff --git a/lib/librte_power/Makefile b/lib/librte_power/Makefile
index 06cd10e8..1b1491d7 100644
--- a/lib/librte_power/Makefile
+++ b/lib/librte_power/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_power.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -fno-strict-aliasing
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_power_version.map
diff --git a/lib/librte_power/channel_commands.h b/lib/librte_power/channel_commands.h
index 383897bf..f0f5f0a2 100644
--- a/lib/librte_power/channel_commands.h
+++ b/lib/librte_power/channel_commands.h
@@ -39,6 +39,7 @@ extern "C" {
#endif
#include <stdint.h>
+#include <stdbool.h>
/* Maximum number of channels per VM */
#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
@@ -46,17 +47,60 @@ extern "C" {
/* Valid Commands */
#define CPU_POWER 1
#define CPU_POWER_CONNECT 2
+#define PKT_POLICY 3
/* CPU Power Command Scaling */
#define CPU_POWER_SCALE_UP 1
#define CPU_POWER_SCALE_DOWN 2
#define CPU_POWER_SCALE_MAX 3
#define CPU_POWER_SCALE_MIN 4
+#define CPU_POWER_ENABLE_TURBO 5
+#define CPU_POWER_DISABLE_TURBO 6
+#define HOURS 24
+
+#define MAX_VFS 10
+#define VM_MAX_NAME_SZ 32
+
+#define MAX_VCPU_PER_VM 8
+
+struct t_boost_status {
+ bool tbEnabled;
+};
+
+struct timer_profile {
+ int busy_hours[HOURS];
+ int quiet_hours[HOURS];
+ int hours_to_use_traffic_profile[HOURS];
+};
+
+enum workload {HIGH, MEDIUM, LOW};
+enum policy_to_use {
+ TRAFFIC,
+ TIME,
+ WORKLOAD
+};
+
+struct traffic {
+ uint32_t min_packet_thresh;
+ uint32_t avg_max_packet_thresh;
+ uint32_t max_max_packet_thresh;
+};
struct channel_packet {
uint64_t resource_id; /**< core_num, device */
uint32_t unit; /**< scale down/up/min/max */
uint32_t command; /**< Power, IO, etc */
+ char vm_name[VM_MAX_NAME_SZ];
+
+ uint64_t vfid[MAX_VFS];
+ int nb_mac_to_monitor;
+ struct traffic traffic_policy;
+ uint8_t vcpu_to_control[MAX_VCPU_PER_VM];
+ uint8_t num_vcpu;
+ struct timer_profile timer_policy;
+ enum workload workload;
+ enum policy_to_use policy_to_use;
+ struct t_boost_status t_boost_status;
};
diff --git a/lib/librte_power/guest_channel.c b/lib/librte_power/guest_channel.c
index 85c92fab..fa5de0f5 100644
--- a/lib/librte_power/guest_channel.c
+++ b/lib/librte_power/guest_channel.c
@@ -148,6 +148,13 @@ guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id)
return 0;
}
+int rte_power_guest_channel_send_msg(struct channel_packet *pkt,
+ unsigned int lcore_id)
+{
+ return guest_channel_send_msg(pkt, lcore_id);
+}
+
+
void
guest_channel_host_disconnect(unsigned lcore_id)
{
diff --git a/lib/librte_power/guest_channel.h b/lib/librte_power/guest_channel.h
index 9e18af52..741339ca 100644
--- a/lib/librte_power/guest_channel.h
+++ b/lib/librte_power/guest_channel.h
@@ -81,6 +81,21 @@ void guest_channel_host_disconnect(unsigned lcore_id);
*/
int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
+/**
+ * Send a message contained in pkt over the Virtio-Serial to the host endpoint.
+ *
+ * @param pkt
+ * Pointer to a populated struct channel_packet
+ *
+ * @param lcore_id
+ * lcore_id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int rte_power_guest_channel_send_msg(struct channel_packet *pkt,
+ unsigned int lcore_id);
#ifdef __cplusplus
}
diff --git a/lib/librte_power/rte_power.c b/lib/librte_power/rte_power.c
index 998ed1c9..b327a865 100644
--- a/lib/librte_power/rte_power.c
+++ b/lib/librte_power/rte_power.c
@@ -50,6 +50,9 @@ rte_power_freq_change_t rte_power_freq_up = NULL;
rte_power_freq_change_t rte_power_freq_down = NULL;
rte_power_freq_change_t rte_power_freq_max = NULL;
rte_power_freq_change_t rte_power_freq_min = NULL;
+rte_power_freq_change_t rte_power_turbo_status;
+rte_power_freq_change_t rte_power_freq_enable_turbo;
+rte_power_freq_change_t rte_power_freq_disable_turbo;
int
rte_power_set_env(enum power_management_env env)
@@ -65,6 +68,9 @@ rte_power_set_env(enum power_management_env env)
rte_power_freq_down = rte_power_acpi_cpufreq_freq_down;
rte_power_freq_min = rte_power_acpi_cpufreq_freq_min;
rte_power_freq_max = rte_power_acpi_cpufreq_freq_max;
+ rte_power_turbo_status = rte_power_acpi_turbo_status;
+ rte_power_freq_enable_turbo = rte_power_acpi_enable_turbo;
+ rte_power_freq_disable_turbo = rte_power_acpi_disable_turbo;
} else if (env == PM_ENV_KVM_VM) {
rte_power_freqs = rte_power_kvm_vm_freqs;
rte_power_get_freq = rte_power_kvm_vm_get_freq;
@@ -73,6 +79,9 @@ rte_power_set_env(enum power_management_env env)
rte_power_freq_down = rte_power_kvm_vm_freq_down;
rte_power_freq_min = rte_power_kvm_vm_freq_min;
rte_power_freq_max = rte_power_kvm_vm_freq_max;
+ rte_power_turbo_status = rte_power_kvm_vm_turbo_status;
+ rte_power_freq_enable_turbo = rte_power_kvm_vm_enable_turbo;
+ rte_power_freq_disable_turbo = rte_power_kvm_vm_disable_turbo;
} else {
RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n",
env);
diff --git a/lib/librte_power/rte_power.h b/lib/librte_power/rte_power.h
index 67e0ec02..b17b7a53 100644
--- a/lib/librte_power/rte_power.h
+++ b/lib/librte_power/rte_power.h
@@ -236,6 +236,47 @@ extern rte_power_freq_change_t rte_power_freq_max;
*/
extern rte_power_freq_change_t rte_power_freq_min;
+/**
+ * Query the Turbo Boost status of a specific lcore.
+ * Review each environments specific documentation for usage..
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 Turbo Boost is enabled for this lcore.
+ * - 0 Turbo Boost is disabled for this lcore.
+ * - Negative on error.
+ */
+extern rte_power_freq_change_t rte_power_turbo_status;
+
+/**
+ * Enable Turbo Boost for this lcore.
+ * Review each environments specific documentation for usage..
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+extern rte_power_freq_change_t rte_power_freq_enable_turbo;
+
+/**
+ * Disable Turbo Boost for this lcore.
+ * Review each environments specific documentation for usage..
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+extern rte_power_freq_change_t rte_power_freq_disable_turbo;
+
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_power/rte_power_acpi_cpufreq.c b/lib/librte_power/rte_power_acpi_cpufreq.c
index a56c9b59..01ac5acb 100644
--- a/lib/librte_power/rte_power_acpi_cpufreq.c
+++ b/lib/librte_power/rte_power_acpi_cpufreq.c
@@ -87,6 +87,14 @@
#define POWER_SYSFILE_SETSPEED \
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_setspeed"
+/*
+ * MSR related
+ */
+#define PLATFORM_INFO 0x0CE
+#define TURBO_RATIO_LIMIT 0x1AD
+#define IA32_PERF_CTL 0x199
+#define CORE_TURBO_DISABLE_BIT ((uint64_t)1<<32)
+
enum power_state {
POWER_IDLE = 0,
POWER_ONGOING,
@@ -105,6 +113,8 @@ struct rte_power_info {
char governor_ori[32]; /**< Original governor name */
uint32_t curr_idx; /**< Freq index in freqs array */
volatile uint32_t state; /**< Power in use state */
+ uint16_t turbo_available; /**< Turbo Boost available */
+ uint16_t turbo_enable; /**< Turbo Boost enable/disable */
} __rte_cache_aligned;
static struct rte_power_info lcore_power_info[RTE_MAX_LCORE];
@@ -244,6 +254,18 @@ power_get_available_freqs(struct rte_power_info *pi)
POWER_CONVERT_TO_DECIMAL);
}
+ if ((pi->freqs[0]-1000) == pi->freqs[1]) {
+ pi->turbo_available = 1;
+ pi->turbo_enable = 1;
+ POWER_DEBUG_TRACE("Lcore %u Can do Turbo Boost\n",
+ pi->lcore_id);
+ } else {
+ pi->turbo_available = 0;
+ pi->turbo_enable = 0;
+ POWER_DEBUG_TRACE("Turbo Boost not available on Lcore %u\n",
+ pi->lcore_id);
+ }
+
ret = 0;
POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
count, pi->lcore_id);
@@ -525,7 +547,17 @@ rte_power_acpi_cpufreq_freq_max(unsigned lcore_id)
}
/* Frequencies in the array are from high to low. */
- return set_freq_internal(&lcore_power_info[lcore_id], 0);
+ if (lcore_power_info[lcore_id].turbo_available) {
+ if (lcore_power_info[lcore_id].turbo_enable)
+ /* Set to Turbo */
+ return set_freq_internal(
+ &lcore_power_info[lcore_id], 0);
+ else
+ /* Set to max non-turbo */
+ return set_freq_internal(
+ &lcore_power_info[lcore_id], 1);
+ } else
+ return set_freq_internal(&lcore_power_info[lcore_id], 0);
}
int
@@ -543,3 +575,80 @@ rte_power_acpi_cpufreq_freq_min(unsigned lcore_id)
/* Frequencies in the array are from high to low. */
return set_freq_internal(pi, pi->nb_freqs - 1);
}
+
+
+int
+rte_power_acpi_turbo_status(unsigned int lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+
+ return pi->turbo_enable;
+}
+
+
+int
+rte_power_acpi_enable_turbo(unsigned int lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+
+ if (pi->turbo_available)
+ pi->turbo_enable = 1;
+ else {
+ pi->turbo_enable = 0;
+ RTE_LOG(ERR, POWER,
+ "Failed to enable turbo on lcore %u\n",
+ lcore_id);
+ return -1;
+ }
+
+ /* Max may have changed, so call to max function */
+ if (rte_power_acpi_cpufreq_freq_max(lcore_id) < 0) {
+ RTE_LOG(ERR, POWER,
+ "Failed to set frequency of lcore %u to max\n",
+ lcore_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_power_acpi_disable_turbo(unsigned int lcore_id)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+
+ pi->turbo_enable = 0;
+
+ if ((pi->turbo_available) && (pi->curr_idx <= 1)) {
+ /* Try to set freq to max by default coming out of turbo */
+ if (rte_power_acpi_cpufreq_freq_max(lcore_id) < 0) {
+ RTE_LOG(ERR, POWER,
+ "Failed to set frequency of lcore %u to max\n",
+ lcore_id);
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/lib/librte_power/rte_power_acpi_cpufreq.h b/lib/librte_power/rte_power_acpi_cpufreq.h
index 68578e9b..eee0ca0a 100644
--- a/lib/librte_power/rte_power_acpi_cpufreq.h
+++ b/lib/librte_power/rte_power_acpi_cpufreq.h
@@ -185,6 +185,46 @@ int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id);
*/
int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);
+/**
+ * Get the turbo status of a specific lcore.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 Turbo Boost is enabled on this lcore.
+ * - 0 Turbo Boost is disabled on this lcore.
+ * - Negative on error.
+ */
+int rte_power_acpi_turbo_status(unsigned int lcore_id);
+
+/**
+ * Enable Turbo Boost on a specific lcore.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 Turbo Boost is enabled successfully on this lcore.
+ * - Negative on error.
+ */
+int rte_power_acpi_enable_turbo(unsigned int lcore_id);
+
+/**
+ * Disable Turbo Boost on a specific lcore.
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 0 Turbo Boost disabled successfully on this lcore.
+ * - Negative on error.
+ */
+int rte_power_acpi_disable_turbo(unsigned int lcore_id);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_power/rte_power_kvm_vm.c b/lib/librte_power/rte_power_kvm_vm.c
index a1badf34..99060625 100644
--- a/lib/librte_power/rte_power_kvm_vm.c
+++ b/lib/librte_power/rte_power_kvm_vm.c
@@ -134,3 +134,22 @@ rte_power_kvm_vm_freq_min(unsigned lcore_id)
{
return send_msg(lcore_id, CPU_POWER_SCALE_MIN);
}
+
+int
+rte_power_kvm_vm_turbo_status(__attribute__((unused)) unsigned int lcore_id)
+{
+ RTE_LOG(ERR, POWER, "rte_power_turbo_status is not implemented for Virtual Machine Power Management\n");
+ return -ENOTSUP;
+}
+
+int
+rte_power_kvm_vm_enable_turbo(unsigned int lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_ENABLE_TURBO);
+}
+
+int
+rte_power_kvm_vm_disable_turbo(unsigned int lcore_id)
+{
+ return send_msg(lcore_id, CPU_POWER_DISABLE_TURBO);
+}
diff --git a/lib/librte_power/rte_power_kvm_vm.h b/lib/librte_power/rte_power_kvm_vm.h
index dcbc878a..9af41d64 100644
--- a/lib/librte_power/rte_power_kvm_vm.h
+++ b/lib/librte_power/rte_power_kvm_vm.h
@@ -172,8 +172,41 @@ int rte_power_kvm_vm_freq_max(unsigned lcore_id);
*/
int rte_power_kvm_vm_freq_min(unsigned lcore_id);
+/**
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * -ENOTSUP
+ */
+int rte_power_kvm_vm_turbo_status(unsigned int lcore_id);
+
+/**
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_enable_turbo(unsigned int lcore_id);
+
+/**
+ * It should be protected outside of this function for threadsafe.
+ *
+ * @param lcore_id
+ * lcore id.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int rte_power_kvm_vm_disable_turbo(unsigned int lcore_id);
#ifdef __cplusplus
}
#endif
-
#endif
diff --git a/lib/librte_power/rte_power_version.map b/lib/librte_power/rte_power_version.map
index db75ff3e..96dc42ec 100644
--- a/lib/librte_power/rte_power_version.map
+++ b/lib/librte_power/rte_power_version.map
@@ -16,3 +16,13 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.11 {
+ global:
+
+ rte_power_guest_channel_send_msg;
+ rte_power_freq_disable_turbo;
+ rte_power_freq_enable_turbo;
+ rte_power_turbo_status;
+
+} DPDK_2.0; \ No newline at end of file
diff --git a/lib/librte_reorder/Makefile b/lib/librte_reorder/Makefile
index 4e44e72f..5d38d712 100644
--- a/lib/librte_reorder/Makefile
+++ b/lib/librte_reorder/Makefile
@@ -36,6 +36,7 @@ LIB = librte_reorder.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf
EXPORT_MAP := rte_reorder_version.map
diff --git a/lib/librte_reorder/rte_reorder.c b/lib/librte_reorder/rte_reorder.c
index 010dff68..867775da 100644
--- a/lib/librte_reorder/rte_reorder.c
+++ b/lib/librte_reorder/rte_reorder.c
@@ -36,7 +36,6 @@
#include <rte_log.h>
#include <rte_mbuf.h>
-#include <rte_memzone.h>
#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_malloc.h>
diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile
index 3e2f4b87..e34d9d95 100644
--- a/lib/librte_ring/Makefile
+++ b/lib/librte_ring/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_ring.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_ring_version.map
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 8f5a4937..5e9b3b7b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -174,7 +174,7 @@ struct rte_ring {
* ring space will be wasted.
*/
#define RING_F_EXACT_SZ 0x0004
-#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
+#define RTE_RING_SZ_MASK (0x7fffffffU) /**< Ring size mask */
/* @internal defines for passing to the enqueue dequeue worker functions */
#define __IS_SP 1
diff --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile
index 18274e73..04785f72 100644
--- a/lib/librte_sched/Makefile
+++ b/lib/librte_sched/Makefile
@@ -43,6 +43,8 @@ CFLAGS_rte_red.o := -D_GNU_SOURCE
LDLIBS += -lm
LDLIBS += -lrt
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_net
+LDLIBS += -lrte_timer
EXPORT_MAP := rte_sched_version.map
@@ -55,7 +57,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SCHED) += rte_sched.c rte_red.c rte_approx.c
SRCS-$(CONFIG_RTE_LIBRTE_SCHED) += rte_reciprocal.c
# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_bitmap.h rte_sched_common.h rte_red.h rte_approx.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_sched_common.h rte_red.h rte_approx.h
SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include += rte_reciprocal.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index b7cba110..a2d0d685 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -42,9 +42,9 @@
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_mbuf.h>
+#include <rte_bitmap.h>
#include "rte_sched.h"
-#include "rte_bitmap.h"
#include "rte_sched_common.h"
#include "rte_approx.h"
#include "rte_reciprocal.h"
diff --git a/lib/librte_security/Makefile b/lib/librte_security/Makefile
new file mode 100644
index 00000000..bb93ec33
--- /dev/null
+++ b/lib/librte_security/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_security.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool
+
+# library source files
+SRCS-y += rte_security.c
+
+# export include files
+SYMLINK-y-include += rte_security.h
+SYMLINK-y-include += rte_security_driver.h
+
+# versioning export map
+EXPORT_MAP := rte_security_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_security/rte_security.c b/lib/librte_security/rte_security.c
new file mode 100644
index 00000000..1227fca8
--- /dev/null
+++ b/lib/librte_security/rte_security.c
@@ -0,0 +1,149 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "rte_security.h"
+#include "rte_security_driver.h"
+
+struct rte_security_session *
+rte_security_session_create(struct rte_security_ctx *instance,
+ struct rte_security_session_conf *conf,
+ struct rte_mempool *mp)
+{
+ struct rte_security_session *sess = NULL;
+
+ if (conf == NULL)
+ return NULL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_create, NULL);
+
+ if (rte_mempool_get(mp, (void *)&sess))
+ return NULL;
+
+ if (instance->ops->session_create(instance->device, conf, sess, mp)) {
+ rte_mempool_put(mp, (void *)sess);
+ return NULL;
+ }
+ instance->sess_cnt++;
+
+ return sess;
+}
+
+int
+rte_security_session_update(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_security_session_conf *conf)
+{
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_update, -ENOTSUP);
+ return instance->ops->session_update(instance->device, sess, conf);
+}
+
+int
+rte_security_session_stats_get(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_security_stats *stats)
+{
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_stats_get, -ENOTSUP);
+ return instance->ops->session_stats_get(instance->device, sess, stats);
+}
+
+int
+rte_security_session_destroy(struct rte_security_ctx *instance,
+ struct rte_security_session *sess)
+{
+ int ret;
+ struct rte_mempool *mp = rte_mempool_from_obj(sess);
+
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_destroy, -ENOTSUP);
+
+ if (instance->sess_cnt)
+ instance->sess_cnt--;
+
+ ret = instance->ops->session_destroy(instance->device, sess);
+ if (!ret)
+ rte_mempool_put(mp, (void *)sess);
+
+ return ret;
+}
+
+int
+rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_mbuf *m, void *params)
+{
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->set_pkt_metadata, -ENOTSUP);
+ return instance->ops->set_pkt_metadata(instance->device,
+ sess, m, params);
+}
+
+const struct rte_security_capability *
+rte_security_capabilities_get(struct rte_security_ctx *instance)
+{
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL);
+ return instance->ops->capabilities_get(instance->device);
+}
+
+const struct rte_security_capability *
+rte_security_capability_get(struct rte_security_ctx *instance,
+ struct rte_security_capability_idx *idx)
+{
+ const struct rte_security_capability *capabilities;
+ const struct rte_security_capability *capability;
+ uint16_t i = 0;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL);
+ capabilities = instance->ops->capabilities_get(instance->device);
+
+ if (capabilities == NULL)
+ return NULL;
+
+ while ((capability = &capabilities[i++])->action
+ != RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (capability->action == idx->action &&
+ capability->protocol == idx->protocol) {
+ if (idx->protocol == RTE_SECURITY_PROTOCOL_IPSEC) {
+ if (capability->ipsec.proto ==
+ idx->ipsec.proto &&
+ capability->ipsec.mode ==
+ idx->ipsec.mode &&
+ capability->ipsec.direction ==
+ idx->ipsec.direction)
+ return capability;
+ }
+ }
+ }
+
+ return NULL;
+}
diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h
new file mode 100644
index 00000000..7e687d29
--- /dev/null
+++ b/lib/librte_security/rte_security.h
@@ -0,0 +1,529 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 NXP.
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SECURITY_H_
+#define _RTE_SECURITY_H_
+
+/**
+ * @file rte_security.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE Security Common Definitions
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+/** IPSec protocol mode */
+enum rte_security_ipsec_sa_mode {
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ /**< IPSec Transport mode */
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ /**< IPSec Tunnel mode */
+};
+
+/** IPSec Protocol */
+enum rte_security_ipsec_sa_protocol {
+ RTE_SECURITY_IPSEC_SA_PROTO_AH,
+ /**< AH protocol */
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ /**< ESP protocol */
+};
+
+/** IPSEC tunnel type */
+enum rte_security_ipsec_tunnel_type {
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4,
+ /**< Outer header is IPv4 */
+ RTE_SECURITY_IPSEC_TUNNEL_IPV6,
+ /**< Outer header is IPv6 */
+};
+
+/**
+ * Security context for crypto/eth devices
+ *
+ * Security instance for each driver to register security operations.
+ * The application can get the security context from the crypto/eth device id
+ * using the APIs rte_cryptodev_get_sec_ctx()/rte_eth_dev_get_sec_ctx()
+ * This structure is used to identify the device(crypto/eth) for which the
+ * security operations need to be performed.
+ */
+struct rte_security_ctx {
+ void *device;
+ /**< Crypto/ethernet device attached */
+ struct rte_security_ops *ops;
+ /**< Pointer to security ops for the device */
+ uint16_t sess_cnt;
+ /**< Number of sessions attached to this context */
+};
+
+/**
+ * IPSEC tunnel parameters
+ *
+ * These parameters are used to build outbound tunnel headers.
+ */
+struct rte_security_ipsec_tunnel_param {
+ enum rte_security_ipsec_tunnel_type type;
+ /**< Tunnel type: IPv4 or IPv6 */
+ RTE_STD_C11
+ union {
+ struct {
+ struct in_addr src_ip;
+ /**< IPv4 source address */
+ struct in_addr dst_ip;
+ /**< IPv4 destination address */
+ uint8_t dscp;
+ /**< IPv4 Differentiated Services Code Point */
+ uint8_t df;
+ /**< IPv4 Don't Fragment bit */
+ uint8_t ttl;
+ /**< IPv4 Time To Live */
+ } ipv4;
+ /**< IPv4 header parameters */
+ struct {
+ struct in6_addr src_addr;
+ /**< IPv6 source address */
+ struct in6_addr dst_addr;
+ /**< IPv6 destination address */
+ uint8_t dscp;
+ /**< IPv6 Differentiated Services Code Point */
+ uint32_t flabel;
+ /**< IPv6 flow label */
+ uint8_t hlimit;
+ /**< IPv6 hop limit */
+ } ipv6;
+ /**< IPv6 header parameters */
+ };
+};
+
+/**
+ * IPsec Security Association option flags
+ */
+struct rte_security_ipsec_sa_options {
+ /**< Extended Sequence Numbers (ESN)
+ *
+ * * 1: Use extended (64 bit) sequence numbers
+ * * 0: Use normal sequence numbers
+ */
+ uint32_t esn : 1;
+
+ /**< UDP encapsulation
+ *
+ * * 1: Do UDP encapsulation/decapsulation so that IPSEC packets can
+ * traverse through NAT boxes.
+ * * 0: No UDP encapsulation
+ */
+ uint32_t udp_encap : 1;
+
+ /**< Copy DSCP bits
+ *
+ * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to
+ * the outer IP header in encapsulation, and vice versa in
+ * decapsulation.
+ * * 0: Do not change DSCP field.
+ */
+ uint32_t copy_dscp : 1;
+
+ /**< Copy IPv6 Flow Label
+ *
+ * * 1: Copy IPv6 flow label from inner IPv6 header to the
+ * outer IPv6 header.
+ * * 0: Outer header is not modified.
+ */
+ uint32_t copy_flabel : 1;
+
+ /**< Copy IPv4 Don't Fragment bit
+ *
+ * * 1: Copy the DF bit from the inner IPv4 header to the outer
+ * IPv4 header.
+ * * 0: Outer header is not modified.
+ */
+ uint32_t copy_df : 1;
+
+ /**< Decrement inner packet Time To Live (TTL) field
+ *
+ * * 1: In tunnel mode, decrement inner packet IPv4 TTL or
+ * IPv6 Hop Limit after tunnel decapsulation, or before tunnel
+ * encapsulation.
+ * * 0: Inner packet is not modified.
+ */
+ uint32_t dec_ttl : 1;
+};
+
+/** IPSec security association direction */
+enum rte_security_ipsec_sa_direction {
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ /**< Encrypt and generate digest */
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ /**< Verify digest and decrypt */
+};
+
+/**
+ * IPsec security association configuration data.
+ *
+ * This structure contains data required to create an IPsec SA security session.
+ */
+struct rte_security_ipsec_xform {
+ uint32_t spi;
+ /**< SA security parameter index */
+ uint32_t salt;
+ /**< SA salt */
+ struct rte_security_ipsec_sa_options options;
+ /**< various SA options */
+ enum rte_security_ipsec_sa_direction direction;
+ /**< IPSec SA Direction - Egress/Ingress */
+ enum rte_security_ipsec_sa_protocol proto;
+ /**< IPsec SA Protocol - AH/ESP */
+ enum rte_security_ipsec_sa_mode mode;
+ /**< IPsec SA Mode - transport/tunnel */
+ struct rte_security_ipsec_tunnel_param tunnel;
+ /**< Tunnel parameters, NULL for transport mode */
+};
+
+/**
+ * MACsec security session configuration
+ */
+struct rte_security_macsec_xform {
+ /** To be Filled */
+};
+
+/**
+ * Security session action type.
+ */
+enum rte_security_session_action_type {
+ RTE_SECURITY_ACTION_TYPE_NONE,
+ /**< No security actions */
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ /**< Crypto processing for security protocol is processed inline
+ * during transmission
+ */
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ /**< All security protocol processing is performed inline during
+ * transmission
+ */
+ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
+ /**< All security protocol processing including crypto is performed
+ * on a lookaside accelerator
+ */
+};
+
+/** Security session protocol definition */
+enum rte_security_session_protocol {
+ RTE_SECURITY_PROTOCOL_IPSEC,
+ /**< IPsec Protocol */
+ RTE_SECURITY_PROTOCOL_MACSEC,
+ /**< MACSec Protocol */
+};
+
+/**
+ * Security session configuration
+ */
+struct rte_security_session_conf {
+ enum rte_security_session_action_type action_type;
+ /**< Type of action to be performed on the session */
+ enum rte_security_session_protocol protocol;
+ /**< Security protocol to be configured */
+ union {
+ struct rte_security_ipsec_xform ipsec;
+ struct rte_security_macsec_xform macsec;
+ };
+ /**< Configuration parameters for security session */
+ struct rte_crypto_sym_xform *crypto_xform;
+ /**< Security Session Crypto Transformations */
+};
+
+struct rte_security_session {
+ void *sess_private_data;
+ /**< Private session material */
+};
+
+/**
+ * Create security session as specified by the session configuration
+ *
+ * @param instance security instance
+ * @param conf session configuration parameters
+ * @param mp mempool to allocate session objects from
+ * @return
+ * - On success, pointer to session
+ * - On failure, NULL
+ */
+struct rte_security_session *
+rte_security_session_create(struct rte_security_ctx *instance,
+ struct rte_security_session_conf *conf,
+ struct rte_mempool *mp);
+
+/**
+ * Update security session as specified by the session configuration
+ *
+ * @param instance security instance
+ * @param sess session to update parameters
+ * @param conf update configuration parameters
+ * @return
+ * - On success returns 0
+ * - On failure return errno
+ */
+int
+rte_security_session_update(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_security_session_conf *conf);
+
+/**
+ * Free security session header and the session private data and
+ * return it to its original mempool.
+ *
+ * @param instance security instance
+ * @param sess security session to freed
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+int
+rte_security_session_destroy(struct rte_security_ctx *instance,
+ struct rte_security_session *sess);
+
+/**
+ * Updates the buffer with device-specific defined metadata
+ *
+ * @param instance security instance
+ * @param sess security session
+ * @param mb packet mbuf to set metadata on.
+ * @param params device-specific defined parameters
+ * required for metadata
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_mbuf *mb, void *params);
+
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param sym_op crypto operation
+ * @param sess security session
+ */
+static inline int
+__rte_security_attach_session(struct rte_crypto_sym_op *sym_op,
+ struct rte_security_session *sess)
+{
+ sym_op->sec_session = sess;
+
+ return 0;
+}
+
+static inline void *
+get_sec_session_private_data(const struct rte_security_session *sess)
+{
+ return sess->sess_private_data;
+}
+
+static inline void
+set_sec_session_private_data(struct rte_security_session *sess,
+ void *private_data)
+{
+ sess->sess_private_data = private_data;
+}
+
+/**
+ * Attach a session to a crypto operation.
+ * This API is needed only in case of RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD
+ * For other rte_security_session_action_type, ol_flags in rte_mbuf may be
+ * defined to perform security operations.
+ *
+ * @param op crypto operation
+ * @param sess security session
+ */
+static inline int
+rte_security_attach_session(struct rte_crypto_op *op,
+ struct rte_security_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))
+ return -EINVAL;
+
+ op->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
+
+ return __rte_security_attach_session(op->sym, sess);
+}
+
+struct rte_security_macsec_stats {
+ uint64_t reserved;
+};
+
+struct rte_security_ipsec_stats {
+ uint64_t reserved;
+
+};
+
+struct rte_security_stats {
+ enum rte_security_session_protocol protocol;
+ /**< Security protocol to be configured */
+
+ union {
+ struct rte_security_macsec_stats macsec;
+ struct rte_security_ipsec_stats ipsec;
+ };
+};
+
+/**
+ * Get security session statistics
+ *
+ * @param instance security instance
+ * @param sess security session
+ * @param stats statistics
+ * @return
+ * - On success return 0
+ * - On failure errno
+ */
+int
+rte_security_session_stats_get(struct rte_security_ctx *instance,
+ struct rte_security_session *sess,
+ struct rte_security_stats *stats);
+
+/**
+ * Security capability definition
+ */
+struct rte_security_capability {
+ enum rte_security_session_action_type action;
+ /**< Security action type*/
+ enum rte_security_session_protocol protocol;
+ /**< Security protocol */
+ RTE_STD_C11
+ union {
+ struct {
+ enum rte_security_ipsec_sa_protocol proto;
+ /**< IPsec SA protocol */
+ enum rte_security_ipsec_sa_mode mode;
+ /**< IPsec SA mode */
+ enum rte_security_ipsec_sa_direction direction;
+ /**< IPsec SA direction */
+ struct rte_security_ipsec_sa_options options;
+ /**< IPsec SA supported options */
+ } ipsec;
+ /**< IPsec capability */
+ struct {
+ /* To be Filled */
+ } macsec;
+ /**< MACsec capability */
+ };
+
+ const struct rte_cryptodev_capabilities *crypto_capabilities;
+ /**< Corresponding crypto capabilities for security capability */
+
+ uint32_t ol_flags;
+ /**< Device offload flags */
+};
+
+#define RTE_SECURITY_TX_OLOAD_NEED_MDATA 0x00000001
+/**< HW needs metadata update, see rte_security_set_pkt_metadata().
+ */
+
+#define RTE_SECURITY_TX_HW_TRAILER_OFFLOAD 0x00000002
+/**< HW constructs trailer of packets
+ * Transmitted packets will have the trailer added to them
+ * by hardawre. The next protocol field will be based on
+ * the mbuf->inner_esp_next_proto field.
+ */
+#define RTE_SECURITY_RX_HW_TRAILER_OFFLOAD 0x00010000
+/**< HW removes trailer of packets
+ * Received packets have no trailer, the next protocol field
+ * is supplied in the mbuf->inner_esp_next_proto field.
+ * Inner packet is not modified.
+ */
+
+/**
+ * Security capability index used to query a security instance for a specific
+ * security capability
+ */
+struct rte_security_capability_idx {
+ enum rte_security_session_action_type action;
+ enum rte_security_session_protocol protocol;
+
+ union {
+ struct {
+ enum rte_security_ipsec_sa_protocol proto;
+ enum rte_security_ipsec_sa_mode mode;
+ enum rte_security_ipsec_sa_direction direction;
+ } ipsec;
+ };
+};
+
+/**
+ * Returns array of security instance capabilities
+ *
+ * @param instance Security instance.
+ *
+ * @return
+ * - Returns array of security capabilities.
+ * - Return NULL if no capabilities available.
+ */
+const struct rte_security_capability *
+rte_security_capabilities_get(struct rte_security_ctx *instance);
+
+/**
+ * Query if a specific capability is available on security instance
+ *
+ * @param instance security instance.
+ * @param idx security capability index to match against
+ *
+ * @return
+ * - Returns pointer to security capability on match of capability
+ * index criteria.
+ * - Return NULL if the capability not matched on security instance.
+ */
+const struct rte_security_capability *
+rte_security_capability_get(struct rte_security_ctx *instance,
+ struct rte_security_capability_idx *idx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SECURITY_H_ */
diff --git a/lib/librte_security/rte_security_driver.h b/lib/librte_security/rte_security_driver.h
new file mode 100644
index 00000000..997fbe79
--- /dev/null
+++ b/lib/librte_security/rte_security_driver.h
@@ -0,0 +1,156 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SECURITY_DRIVER_H_
+#define _RTE_SECURITY_DRIVER_H_
+
+/**
+ * @file rte_security_driver.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE Security Common Definitions
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_security.h"
+
+/**
+ * Configure a security session on a device.
+ *
+ * @param device Crypto/eth device pointer
+ * @param conf Security session configuration
+ * @param sess Pointer to Security private session structure
+ * @param mp Mempool where the private session is allocated
+ *
+ * @return
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*security_session_create_t)(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mp);
+
+/**
+ * Free driver private session data.
+ *
+ * @param dev Crypto/eth device pointer
+ * @param sess Security session structure
+ */
+typedef int (*security_session_destroy_t)(void *device,
+ struct rte_security_session *sess);
+
+/**
+ * Update driver private session data.
+ *
+ * @param device Crypto/eth device pointer
+ * @param sess Pointer to Security private session structure
+ * @param conf Security session configuration
+ *
+ * @return
+ * - Returns 0 if private session structure have been updated successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ */
+typedef int (*security_session_update_t)(void *device,
+ struct rte_security_session *sess,
+ struct rte_security_session_conf *conf);
+/**
+ * Get stats from the PMD.
+ *
+ * @param device Crypto/eth device pointer
+ * @param sess Pointer to Security private session structure
+ * @param stats Security stats of the driver
+ *
+ * @return
+ * - Returns 0 if private session structure have been updated successfully.
+ * - Returns -EINVAL if session parameters are invalid.
+ */
+typedef int (*security_session_stats_get_t)(void *device,
+ struct rte_security_session *sess,
+ struct rte_security_stats *stats);
+
+/**
+ * Update the mbuf with provided metadata.
+ *
+ * @param sess Security session structure
+ * @param mb Packet buffer
+ * @param mt Metadata
+ *
+ * @return
+ * - Returns 0 if metadata updated successfully.
+ * - Returns -ve value for errors.
+ */
+typedef int (*security_set_pkt_metadata_t)(void *device,
+ struct rte_security_session *sess, struct rte_mbuf *m,
+ void *params);
+
+/**
+ * Get security capabilities of the device.
+ *
+ * @param device crypto/eth device pointer
+ *
+ * @return
+ * - Returns rte_security_capability pointer on success.
+ * - Returns NULL on error.
+ */
+typedef const struct rte_security_capability *(*security_capabilities_get_t)(
+ void *device);
+
+/** Security operations function pointer table */
+struct rte_security_ops {
+ security_session_create_t session_create;
+ /**< Configure a security session. */
+ security_session_update_t session_update;
+ /**< Update a security session. */
+ security_session_stats_get_t session_stats_get;
+ /**< Get security session statistics. */
+ security_session_destroy_t session_destroy;
+ /**< Clear a security sessions private data. */
+ security_set_pkt_metadata_t set_pkt_metadata;
+ /**< Update mbuf metadata. */
+ security_capabilities_get_t capabilities_get;
+ /**< Get security capabilities. */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SECURITY_DRIVER_H_ */
diff --git a/lib/librte_security/rte_security_version.map b/lib/librte_security/rte_security_version.map
new file mode 100644
index 00000000..e12c04b2
--- /dev/null
+++ b/lib/librte_security/rte_security_version.map
@@ -0,0 +1,14 @@
+EXPERIMENTAL {
+ global:
+
+ rte_security_attach_session;
+ rte_security_capabilities_get;
+ rte_security_capability_get;
+ rte_security_session_create;
+ rte_security_session_destroy;
+ rte_security_session_stats_get;
+ rte_security_session_update;
+ rte_security_set_pkt_metadata;
+
+ local: *;
+};
diff --git a/lib/librte_table/Makefile b/lib/librte_table/Makefile
index 8ddc8804..2e32fbf1 100644
--- a/lib/librte_table/Makefile
+++ b/lib/librte_table/Makefile
@@ -38,10 +38,15 @@ LIB = librte_table.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_port
+LDLIBS += -lrte_lpm -lrte_hash
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+LDLIBS += -lrte_acl
+endif
EXPORT_MAP := rte_table_version.map
-LIBABIVER := 2
+LIBABIVER := 3
#
# all source are stored in SRCS-y
diff --git a/lib/librte_table/rte_table_hash.h b/lib/librte_table/rte_table_hash.h
index 57505a6f..15f1902b 100644
--- a/lib/librte_table/rte_table_hash.h
+++ b/lib/librte_table/rte_table_hash.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,8 +45,6 @@ extern "C" {
* These tables use the exact match criterion to uniquely associate data to
* lookup keys.
*
- * Use-cases: Flow classification table, Address Resolution Protocol (ARP) table
- *
* Hash table types:
* 1. Entry add strategy on bucket full:
* a. Least Recently Used (LRU): One of the existing keys in the bucket is
@@ -59,7 +57,7 @@ extern "C" {
* to the bucket, it also becomes the new MRU key. When a key needs to
* be picked and dropped, the most likely candidate for drop, i.e. the
* current LRU key, is always picked. The LRU logic requires maintaining
- * specific data structures per each bucket.
+ * specific data structures per each bucket. Use-cases: flow cache, etc.
* b. Extendible bucket (ext): The bucket is extended with space for 4 more
* keys. This is done by allocating additional memory at table init time,
* which is used to create a pool of free keys (the size of this pool is
@@ -73,20 +71,8 @@ extern "C" {
* first group of 4 keys, the search continues beyond the first group of
* 4 keys, potentially until all keys in this bucket are examined. The
* extendible bucket logic requires maintaining specific data structures
- * per table and per each bucket.
- * 2. Key signature computation:
- * a. Pre-computed key signature: The key lookup operation is split between
- * two CPU cores. The first CPU core (typically the CPU core performing
- * packet RX) extracts the key from the input packet, computes the key
- * signature and saves both the key and the key signature in the packet
- * buffer as packet meta-data. The second CPU core reads both the key and
- * the key signature from the packet meta-data and performs the bucket
- * search step of the key lookup operation.
- * b. Key signature computed on lookup (do-sig): The same CPU core reads
- * the key from the packet meta-data, uses it to compute the key
- * signature and also performs the bucket search step of the key lookup
- * operation.
- * 3. Key size:
+ * per table and per each bucket. Use-cases: flow table, etc.
+ * 2. Key size:
* a. Configurable key size
* b. Single key size (8-byte, 16-byte or 32-byte key size)
*
@@ -98,59 +84,28 @@ extern "C" {
/** Hash function */
typedef uint64_t (*rte_table_hash_op_hash)(
void *key,
+ void *key_mask,
uint32_t key_size,
uint64_t seed);
-/**
- * Hash tables with configurable key size
- *
- */
-/** Extendible bucket hash table parameters */
-struct rte_table_hash_ext_params {
+/** Hash table parameters */
+struct rte_table_hash_params {
+ /** Name */
+ const char *name;
+
/** Key size (number of bytes) */
uint32_t key_size;
- /** Maximum number of keys */
- uint32_t n_keys;
-
- /** Number of hash table buckets. Each bucket stores up to 4 keys. */
- uint32_t n_buckets;
-
- /** Number of hash table bucket extensions. Each bucket extension has
- space for 4 keys and each bucket can have 0, 1 or more extensions. */
- uint32_t n_buckets_ext;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed value for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
/** Byte offset within packet meta-data where the key is located */
uint32_t key_offset;
-};
-/** Extendible bucket hash table operations for pre-computed key signature */
-extern struct rte_table_ops rte_table_hash_ext_ops;
-
-/** Extendible bucket hash table operations for key signature computed on
- lookup ("do-sig") */
-extern struct rte_table_ops rte_table_hash_ext_dosig_ops;
-
-/** LRU hash table parameters */
-struct rte_table_hash_lru_params {
- /** Key size (number of bytes) */
- uint32_t key_size;
+ /** Key mask */
+ uint8_t *key_mask;
- /** Maximum number of keys */
+ /** Number of keys */
uint32_t n_keys;
- /** Number of hash table buckets. Each bucket stores up to 4 keys. */
+ /** Number of buckets */
uint32_t n_buckets;
/** Hash function */
@@ -158,239 +113,23 @@ struct rte_table_hash_lru_params {
/** Seed value for the hash function */
uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-};
-
-/** LRU hash table operations for pre-computed key signature */
-extern struct rte_table_ops rte_table_hash_lru_ops;
-
-/** LRU hash table operations for key signature computed on lookup ("do-sig") */
-extern struct rte_table_ops rte_table_hash_lru_dosig_ops;
-
-/**
- * 8-byte key hash tables
- *
- */
-/** LRU hash table parameters */
-struct rte_table_hash_key8_lru_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-
- /** Bit-mask to be AND-ed to the key on lookup */
- uint8_t *key_mask;
};
-/** LRU hash table operations for pre-computed key signature */
-extern struct rte_table_ops rte_table_hash_key8_lru_ops;
-
-/** LRU hash table operations for key signature computed on lookup ("do-sig") */
-extern struct rte_table_ops rte_table_hash_key8_lru_dosig_ops;
-
-/** Extendible bucket hash table parameters */
-struct rte_table_hash_key8_ext_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Number of entries (and keys) for hash table bucket extensions. Each
- bucket is extended in increments of 4 keys. */
- uint32_t n_entries_ext;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-
- /** Bit-mask to be AND-ed to the key on lookup */
- uint8_t *key_mask;
-};
-
-/** Extendible bucket hash table operations for pre-computed key signature */
+/** Extendible bucket hash table operations */
+extern struct rte_table_ops rte_table_hash_ext_ops;
extern struct rte_table_ops rte_table_hash_key8_ext_ops;
-
-/** Extendible bucket hash table operations for key signature computed on
- lookup ("do-sig") */
-extern struct rte_table_ops rte_table_hash_key8_ext_dosig_ops;
-
-/**
- * 16-byte key hash tables
- *
- */
-/** LRU hash table parameters */
-struct rte_table_hash_key16_lru_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-
- /** Bit-mask to be AND-ed to the key on lookup */
- uint8_t *key_mask;
-};
-
-/** LRU hash table operations for pre-computed key signature */
-extern struct rte_table_ops rte_table_hash_key16_lru_ops;
-
-/** LRU hash table operations for key signature computed on lookup
- ("do-sig") */
-extern struct rte_table_ops rte_table_hash_key16_lru_dosig_ops;
-
-/** Extendible bucket hash table parameters */
-struct rte_table_hash_key16_ext_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Number of entries (and keys) for hash table bucket extensions. Each
- bucket is extended in increments of 4 keys. */
- uint32_t n_entries_ext;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-
- /** Bit-mask to be AND-ed to the key on lookup */
- uint8_t *key_mask;
-};
-
-/** Extendible bucket operations for pre-computed key signature */
extern struct rte_table_ops rte_table_hash_key16_ext_ops;
-
-/** Extendible bucket hash table operations for key signature computed on
- lookup ("do-sig") */
-extern struct rte_table_ops rte_table_hash_key16_ext_dosig_ops;
-
-/**
- * 32-byte key hash tables
- *
- */
-/** LRU hash table parameters */
-struct rte_table_hash_key32_lru_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-};
-
-/** LRU hash table operations for pre-computed key signature */
-extern struct rte_table_ops rte_table_hash_key32_lru_ops;
-
-/** Extendible bucket hash table parameters */
-struct rte_table_hash_key32_ext_params {
- /** Maximum number of entries (and keys) in the table */
- uint32_t n_entries;
-
- /** Number of entries (and keys) for hash table bucket extensions. Each
- bucket is extended in increments of 4 keys. */
- uint32_t n_entries_ext;
-
- /** Hash function */
- rte_table_hash_op_hash f_hash;
-
- /** Seed for the hash function */
- uint64_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
-};
-
-/** Extendible bucket hash table operations */
extern struct rte_table_ops rte_table_hash_key32_ext_ops;
-/** Cuckoo hash table parameters */
-struct rte_table_hash_cuckoo_params {
- /** Key size (number of bytes */
- uint32_t key_size;
-
- /** Maximum number of hash table entries */
- uint32_t n_keys;
-
- /** Hash function used to calculate hash */
- rte_table_hash_op_hash f_hash;
-
- /** Seed value or Init value used by f_hash */
- uint32_t seed;
-
- /** Byte offset within packet meta-data where the 4-byte key signature
- is located. Valid for pre-computed key signature tables, ignored for
- do-sig tables. */
- uint32_t signature_offset;
-
- /** Byte offset within packet meta-data where the key is located */
- uint32_t key_offset;
+/** LRU hash table operations */
+extern struct rte_table_ops rte_table_hash_lru_ops;
- /** Hash table name */
- const char *name;
-};
+extern struct rte_table_ops rte_table_hash_key8_lru_ops;
+extern struct rte_table_ops rte_table_hash_key16_lru_ops;
+extern struct rte_table_ops rte_table_hash_key32_lru_ops;
/** Cuckoo hash table operations */
-extern struct rte_table_ops rte_table_hash_cuckoo_dosig_ops;
+extern struct rte_table_ops rte_table_hash_cuckoo_ops;
#ifdef __cplusplus
}
diff --git a/lib/librte_table/rte_table_hash_cuckoo.c b/lib/librte_table/rte_table_hash_cuckoo.c
index da1597fa..f3845c75 100644
--- a/lib/librte_table/rte_table_hash_cuckoo.c
+++ b/lib/librte_table/rte_table_hash_cuckoo.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
@@ -66,25 +66,28 @@ struct rte_table_hash {
uint32_t n_keys;
rte_table_hash_op_hash f_hash;
uint32_t seed;
- uint32_t signature_offset;
uint32_t key_offset;
- const char *name;
/* cuckoo hash table object */
struct rte_hash *h_table;
/* Lookup table */
- uint8_t memory[0] __rte_cache_aligned; };
+ uint8_t memory[0] __rte_cache_aligned;
+};
static int
-check_params_create_hash_cuckoo(const struct
-rte_table_hash_cuckoo_params *params) {
- /* Check for valid parameters */
+check_params_create_hash_cuckoo(struct rte_table_hash_params *params)
+{
if (params == NULL) {
RTE_LOG(ERR, TABLE, "NULL Input Parameters.\n");
return -EINVAL;
}
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "Table name is NULL.\n");
+ return -EINVAL;
+ }
+
if (params->key_size == 0) {
RTE_LOG(ERR, TABLE, "Invalid key_size.\n");
return -EINVAL;
@@ -100,11 +103,6 @@ rte_table_hash_cuckoo_params *params) {
return -EINVAL;
}
- if (params->name == NULL) {
- RTE_LOG(ERR, TABLE, "Table name is NULL.\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -113,34 +111,24 @@ rte_table_hash_cuckoo_create(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_hash *rte_hash_handle;
+ struct rte_table_hash_params *p = params;
+ struct rte_hash *h_table;
struct rte_table_hash *t;
- uint32_t total_size, total_cl_size;
+ uint32_t total_size;
/* Check input parameters */
- struct rte_table_hash_cuckoo_params *p =
- (struct rte_table_hash_cuckoo_params *) params;
-
if (check_params_create_hash_cuckoo(params))
return NULL;
/* Memory allocation */
- total_cl_size =
- (sizeof(struct rte_table_hash) +
- RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
- total_cl_size += (p->n_keys * entry_size +
- RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
- total_size = total_cl_size * RTE_CACHE_LINE_SIZE;
-
- t = rte_zmalloc_socket("TABLE",
- total_size,
- RTE_CACHE_LINE_SIZE,
- socket_id);
+ total_size = sizeof(struct rte_table_hash) +
+ RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
+
+ t = rte_zmalloc_socket(p->name, total_size, RTE_CACHE_LINE_SIZE, socket_id);
if (t == NULL) {
RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for Cuckoo hash table\n",
- __func__,
- (uint32_t)sizeof(struct rte_table_hash));
+ "%s: Cannot allocate %u bytes for cuckoo hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
@@ -154,13 +142,13 @@ rte_table_hash_cuckoo_create(void *params,
.name = p->name
};
- rte_hash_handle = rte_hash_find_existing(p->name);
- if (rte_hash_handle == NULL) {
- rte_hash_handle = rte_hash_create(&hash_cuckoo_params);
- if (NULL == rte_hash_handle) {
+ h_table = rte_hash_find_existing(p->name);
+ if (h_table == NULL) {
+ h_table = rte_hash_create(&hash_cuckoo_params);
+ if (h_table == NULL) {
RTE_LOG(ERR, TABLE,
- "%s: failed to create cuckoo hash table. keysize: %u",
- __func__, hash_cuckoo_params.key_len);
+ "%s: failed to create cuckoo hash table %s\n",
+ __func__, p->name);
rte_free(t);
return NULL;
}
@@ -172,26 +160,22 @@ rte_table_hash_cuckoo_create(void *params,
t->n_keys = p->n_keys;
t->f_hash = p->f_hash;
t->seed = p->seed;
- t->signature_offset = p->signature_offset;
t->key_offset = p->key_offset;
- t->name = p->name;
- t->h_table = rte_hash_handle;
+ t->h_table = h_table;
RTE_LOG(INFO, TABLE,
- "%s: Cuckoo Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+ "%s: Cuckoo hash table %s memory footprint is %u bytes\n",
+ __func__, p->name, total_size);
return t;
}
static int
rte_table_hash_cuckoo_free(void *table) {
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
struct rte_table_hash *t = table;
+ if (table == NULL)
+ return -EINVAL;
+
rte_hash_free(t->h_table);
rte_free(t);
@@ -200,25 +184,18 @@ rte_table_hash_cuckoo_free(void *table) {
static int
rte_table_hash_cuckoo_entry_add(void *table, void *key, void *entry,
- int *key_found, void **entry_ptr) {
+ int *key_found, void **entry_ptr)
+{
+ struct rte_table_hash *t = table;
int pos = 0;
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (key == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (entry == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entry parameter is NULL\n", __func__);
+ /* Check input parameters */
+ if ((table == NULL) ||
+ (key == NULL) ||
+ (entry == NULL) ||
+ (key_found == NULL) ||
+ (entry_ptr == NULL))
return -EINVAL;
- }
-
- struct rte_table_hash *t = table;
/* Find Existing entries */
pos = rte_hash_lookup(t->h_table, key);
@@ -231,17 +208,15 @@ rte_table_hash_cuckoo_entry_add(void *table, void *key, void *entry,
*entry_ptr = existing_entry;
return 0;
-} else if (pos == -ENOENT) {
- /* Entry not found. Adding new entry */
+ }
+
+ if (pos == -ENOENT) {
+ /* Entry not found. Adding new entry */
uint8_t *new_entry;
pos = rte_hash_add_key(t->h_table, key);
- if (pos < 0) {
- RTE_LOG(ERR, TABLE,
- "%s: Entry not added, status : %u\n",
- __func__, pos);
+ if (pos < 0)
return pos;
- }
new_entry = &t->memory[pos * t->entry_size];
memcpy(new_entry, entry, t->entry_size);
@@ -250,25 +225,22 @@ rte_table_hash_cuckoo_entry_add(void *table, void *key, void *entry,
*entry_ptr = new_entry;
return 0;
}
+
return pos;
}
static int
rte_table_hash_cuckoo_entry_delete(void *table, void *key,
- int *key_found, __rte_unused void *entry) {
+ int *key_found, void *entry)
+{
+ struct rte_table_hash *t = table;
int pos = 0;
- if (table == NULL) {
- RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (key == NULL) {
- RTE_LOG(ERR, TABLE, "%s: key parameter is NULL\n", __func__);
+ /* Check input parameters */
+ if ((table == NULL) ||
+ (key == NULL) ||
+ (key_found == NULL))
return -EINVAL;
- }
-
- struct rte_table_hash *t = table;
pos = rte_hash_del_key(t->h_table, key);
if (pos >= 0) {
@@ -279,20 +251,21 @@ rte_table_hash_cuckoo_entry_delete(void *table, void *key,
memcpy(entry, entry_ptr, t->entry_size);
memset(&t->memory[pos * t->entry_size], 0, t->entry_size);
+ return 0;
}
+ *key_found = 0;
return pos;
}
-
static int
-rte_table_hash_cuckoo_lookup_dosig(void *table,
+rte_table_hash_cuckoo_lookup(void *table,
struct rte_mbuf **pkts,
uint64_t pkts_mask,
uint64_t *lookup_hit_mask,
void **entries)
{
- struct rte_table_hash *t = (struct rte_table_hash *)table;
+ struct rte_table_hash *t = table;
uint64_t pkts_mask_out = 0;
uint32_t i;
@@ -301,20 +274,19 @@ rte_table_hash_cuckoo_lookup_dosig(void *table,
RTE_TABLE_HASH_CUCKOO_STATS_PKTS_IN_ADD(t, n_pkts_in);
if ((pkts_mask & (pkts_mask + 1)) == 0) {
- const uint8_t *keys[64];
- int32_t positions[64], status;
+ const uint8_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
+ int32_t positions[RTE_PORT_IN_BURST_SIZE_MAX], status;
/* Keys for bulk lookup */
for (i = 0; i < n_pkts_in; i++)
keys[i] = RTE_MBUF_METADATA_UINT8_PTR(pkts[i],
- t->key_offset);
+ t->key_offset);
/* Bulk Lookup */
status = rte_hash_lookup_bulk(t->h_table,
(const void **) keys,
n_pkts_in,
positions);
-
if (status == 0) {
for (i = 0; i < n_pkts_in; i++) {
if (likely(positions[i] >= 0)) {
@@ -326,7 +298,7 @@ rte_table_hash_cuckoo_lookup_dosig(void *table,
}
}
}
- } else {
+ } else
for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX
- __builtin_clzll(pkts_mask)); i++) {
uint64_t pkt_mask = 1LLU << i;
@@ -345,7 +317,6 @@ rte_table_hash_cuckoo_lookup_dosig(void *table,
}
}
}
- }
*lookup_hit_mask = pkts_mask_out;
RTE_TABLE_HASH_CUCKOO_STATS_PKTS_LOOKUP_MISS(t,
@@ -370,13 +341,13 @@ rte_table_hash_cuckoo_stats_read(void *table, struct rte_table_stats *stats,
return 0;
}
-struct rte_table_ops rte_table_hash_cuckoo_dosig_ops = {
+struct rte_table_ops rte_table_hash_cuckoo_ops = {
.f_create = rte_table_hash_cuckoo_create,
.f_free = rte_table_hash_cuckoo_free,
.f_add = rte_table_hash_cuckoo_entry_add,
.f_delete = rte_table_hash_cuckoo_entry_delete,
.f_add_bulk = NULL,
.f_delete_bulk = NULL,
- .f_lookup = rte_table_hash_cuckoo_lookup_dosig,
+ .f_lookup = rte_table_hash_cuckoo_lookup,
.f_stats = rte_table_hash_cuckoo_stats_read,
};
diff --git a/lib/librte_table/rte_table_hash_ext.c b/lib/librte_table/rte_table_hash_ext.c
index e7181026..3af1bcab 100644
--- a/lib/librte_table/rte_table_hash_ext.c
+++ b/lib/librte_table/rte_table_hash_ext.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
@@ -106,7 +106,6 @@ struct rte_table_hash {
uint32_t n_buckets_ext;
rte_table_hash_op_hash f_hash;
uint64_t seed;
- uint32_t signature_offset;
uint32_t key_offset;
/* Internal */
@@ -120,6 +119,7 @@ struct rte_table_hash {
struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
/* Tables */
+ uint64_t *key_mask;
struct bucket *buckets;
struct bucket *buckets_ext;
uint8_t *key_mem;
@@ -132,29 +132,53 @@ struct rte_table_hash {
};
static int
-check_params_create(struct rte_table_hash_ext_params *params)
+keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
{
- uint32_t n_buckets_min;
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+ uint32_t i;
+
+ for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
+ if (a64[i] != (b64[i] & b_mask64[i]))
+ return 1;
+
+ return 0;
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+ uint32_t i;
+
+ for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
+ dst64[i] = src64[i] & src_mask64[i];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
/* key_size */
- if ((params->key_size == 0) ||
+ if ((params->key_size < sizeof(uint64_t)) ||
(!rte_is_power_of_2(params->key_size))) {
RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
return -EINVAL;
}
/* n_keys */
- if ((params->n_keys == 0) ||
- (!rte_is_power_of_2(params->n_keys))) {
+ if (params->n_keys == 0) {
RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
return -EINVAL;
}
/* n_buckets */
- n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
if ((params->n_buckets == 0) ||
- (!rte_is_power_of_2(params->n_keys)) ||
- (params->n_buckets < n_buckets_min)) {
+ (!rte_is_power_of_2(params->n_buckets))) {
RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
@@ -171,15 +195,13 @@ check_params_create(struct rte_table_hash_ext_params *params)
static void *
rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_ext_params *p =
- params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *t;
- uint32_t total_size, table_meta_sz;
- uint32_t bucket_sz, bucket_ext_sz, key_sz;
- uint32_t key_stack_sz, bkt_ext_stack_sz, data_sz;
- uint32_t bucket_offset, bucket_ext_offset, key_offset;
- uint32_t key_stack_offset, bkt_ext_stack_offset, data_offset;
- uint32_t i;
+ uint64_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz;
+ uint64_t key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
+ uint64_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset;
+ uint64_t key_stack_offset, bkt_ext_stack_offset, data_offset;
+ uint32_t n_buckets_ext, i;
/* Check input parameters */
if ((check_params_create(p) != 0) ||
@@ -188,38 +210,66 @@ rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
(sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
return NULL;
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
+ * it is guaranteed that n_keys keys can be stored in the table at any time.
+ *
+ * The worst case scenario takes place when all the n_keys keys fall into
+ * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
+ * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
+ * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
+ * into a different bucket. This case defeats the purpose of the hash table.
+ * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
+ *
+ * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
+ */
+ n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
+
/* Memory allocation */
table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
+ key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
bucket_ext_sz =
- RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
+ RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(struct bucket));
key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
bkt_ext_stack_sz =
- RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
+ RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
- total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
- key_stack_sz + bkt_ext_stack_sz + data_sz;
+ total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz +
+ key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
- t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ t = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (t == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
- "%u bytes\n", __func__, p->key_size, total_size);
+ RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory "
+ "footprint is %" PRIu64 " bytes\n",
+ __func__, p->key_size, p->name, total_size);
/* Memory initialization */
t->key_size = p->key_size;
t->entry_size = entry_size;
t->n_keys = p->n_keys;
t->n_buckets = p->n_buckets;
- t->n_buckets_ext = p->n_buckets_ext;
+ t->n_buckets_ext = n_buckets_ext;
t->f_hash = p->f_hash;
t->seed = p->seed;
- t->signature_offset = p->signature_offset;
t->key_offset = p->key_offset;
/* Internal */
@@ -228,13 +278,15 @@ rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
t->data_size_shl = __builtin_ctzl(entry_size);
/* Tables */
- bucket_offset = 0;
+ key_mask_offset = 0;
+ bucket_offset = key_mask_offset + key_mask_sz;
bucket_ext_offset = bucket_offset + bucket_sz;
key_offset = bucket_ext_offset + bucket_ext_sz;
key_stack_offset = key_offset + key_sz;
bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
+ t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
t->buckets = (struct bucket *) &t->memory[bucket_offset];
t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
t->key_mem = &t->memory[key_offset];
@@ -242,6 +294,12 @@ rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
t->data_mem = &t->memory[data_offset];
+ /* Key mask */
+ if (p->key_mask == NULL)
+ memset(t->key_mask, 0xFF, p->key_size);
+ else
+ memcpy(t->key_mask, p->key_mask, p->key_size);
+
/* Key stack */
for (i = 0; i < t->n_keys; i++)
t->key_stack[i] = t->n_keys - 1 - i;
@@ -277,7 +335,7 @@ rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
uint64_t sig;
uint32_t bkt_index, i;
- sig = t->f_hash(key, t->key_size, t->seed);
+ sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt0 = &t->buckets[bkt_index];
sig = (sig >> 16) | 1LLU;
@@ -290,7 +348,7 @@ rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
uint8_t *bkt_key =
&t->key_mem[bkt_key_index << t->key_size_shl];
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
t->key_size) == 0)) {
uint8_t *data = &t->data_mem[bkt_key_index <<
t->data_size_shl];
@@ -327,7 +385,7 @@ rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
bkt->sig[i] = (uint16_t) sig;
bkt->key_pos[i] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
+ keycpy(bkt_key, key, t->key_mask, t->key_size);
memcpy(data, entry, t->entry_size);
*key_found = 0;
@@ -358,7 +416,7 @@ rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
/* Install new key into bucket */
bkt->sig[0] = (uint16_t) sig;
bkt->key_pos[0] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
+ keycpy(bkt_key, key, t->key_mask, t->key_size);
memcpy(data, entry, t->entry_size);
*key_found = 0;
@@ -378,7 +436,7 @@ void *entry)
uint64_t sig;
uint32_t bkt_index, i;
- sig = t->f_hash(key, t->key_size, t->seed);
+ sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt0 = &t->buckets[bkt_index];
sig = (sig >> 16) | 1LLU;
@@ -392,7 +450,7 @@ void *entry)
uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
t->key_size_shl];
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
t->key_size) == 0)) {
uint8_t *data = &t->data_mem[bkt_key_index <<
t->data_size_shl];
@@ -437,8 +495,7 @@ static int rte_table_hash_ext_lookup_unoptimized(
struct rte_mbuf **pkts,
uint64_t pkts_mask,
uint64_t *lookup_hit_mask,
- void **entries,
- int dosig)
+ void **entries)
{
struct rte_table_hash *t = (struct rte_table_hash *) table;
uint64_t pkts_mask_out = 0;
@@ -458,11 +515,7 @@ static int rte_table_hash_ext_lookup_unoptimized(
pkt = pkts[pkt_index];
key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
- if (dosig)
- sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
- else
- sig = RTE_MBUF_METADATA_UINT32(pkt,
- t->signature_offset);
+ sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt0 = &t->buckets[bkt_index];
@@ -476,8 +529,8 @@ static int rte_table_hash_ext_lookup_unoptimized(
uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
t->key_size_shl];
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
- t->key_size) == 0)) {
+ if ((sig == bkt_sig) && (keycmp(bkt_key, key,
+ t->key_mask, t->key_size) == 0)) {
uint8_t *data = &t->data_mem[
bkt_key_index << t->data_size_shl];
@@ -576,11 +629,12 @@ static int rte_table_hash_ext_lookup_unoptimized(
{ \
uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
uint64_t *bkt_key = (uint64_t *) key; \
+ uint64_t *key_mask = f->key_mask; \
\
switch (f->key_size) { \
case 8: \
{ \
- uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
+ uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
match_key = 0; \
if (xor == 0) \
match_key = 1; \
@@ -591,8 +645,8 @@ static int rte_table_hash_ext_lookup_unoptimized(
{ \
uint64_t xor[2], or; \
\
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
or = xor[0] | xor[1]; \
match_key = 0; \
if (or == 0) \
@@ -604,10 +658,10 @@ static int rte_table_hash_ext_lookup_unoptimized(
{ \
uint64_t xor[4], or; \
\
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
+ xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
+ xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
or = xor[0] | xor[1] | xor[2] | xor[3]; \
match_key = 0; \
if (or == 0) \
@@ -619,14 +673,14 @@ static int rte_table_hash_ext_lookup_unoptimized(
{ \
uint64_t xor[8], or; \
\
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- xor[4] = pkt_key[4] ^ bkt_key[4]; \
- xor[5] = pkt_key[5] ^ bkt_key[5]; \
- xor[6] = pkt_key[6] ^ bkt_key[6]; \
- xor[7] = pkt_key[7] ^ bkt_key[7]; \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
+ xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
+ xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
+ xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4]; \
+ xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5]; \
+ xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6]; \
+ xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7]; \
or = xor[0] | xor[1] | xor[2] | xor[3] | \
xor[4] | xor[5] | xor[6] | xor[7]; \
match_key = 0; \
@@ -637,7 +691,7 @@ static int rte_table_hash_ext_lookup_unoptimized(
\
default: \
match_key = 0; \
- if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
+ if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0) \
match_key = 1; \
} \
}
@@ -685,38 +739,7 @@ static int rte_table_hash_ext_lookup_unoptimized(
rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
}
-#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint64_t bucket_mask = t->bucket_mask; \
- uint32_t signature_offset = t->signature_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index) \
+#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
{ \
struct grinder *g10, *g11; \
uint64_t sig10, sig11, bkt10_index, bkt11_index; \
@@ -731,13 +754,13 @@ static int rte_table_hash_ext_lookup_unoptimized(
\
mbuf10 = pkts[pkt10_index]; \
key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
- sig10 = (uint64_t) f_hash(key10, key_size, seed); \
+ sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed); \
bkt10_index = sig10 & bucket_mask; \
bkt10 = &buckets[bkt10_index]; \
\
mbuf11 = pkts[pkt11_index]; \
key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
- sig11 = (uint64_t) f_hash(key11, key_size, seed); \
+ sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed); \
bkt11_index = sig11 & bucket_mask; \
bkt11 = &buckets[bkt11_index]; \
\
@@ -874,7 +897,7 @@ static int rte_table_hash_ext_lookup(
/* Cannot run the pipeline with less than 7 packets */
if (__builtin_popcountll(pkts_mask) < 7) {
status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 0);
+ pkts_mask, lookup_hit_mask, entries);
RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
__builtin_popcountll(*lookup_hit_mask));
return status;
@@ -982,144 +1005,7 @@ static int rte_table_hash_ext_lookup(
uint64_t pkts_mask_out_slow = 0;
status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
- return status;
-}
-
-static int rte_table_hash_ext_lookup_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
- RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7) {
- status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 1);
- RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
- __builtin_popcountll(*lookup_hit_mask));
- return status;
- }
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
+ pkts_mask_match_many, &pkts_mask_out_slow, entries);
pkts_mask_out |= pkts_mask_out_slow;
}
@@ -1152,14 +1038,3 @@ struct rte_table_ops rte_table_hash_ext_ops = {
.f_lookup = rte_table_hash_ext_lookup,
.f_stats = rte_table_hash_ext_stats_read,
};
-
-struct rte_table_ops rte_table_hash_ext_dosig_ops = {
- .f_create = rte_table_hash_ext_create,
- .f_free = rte_table_hash_ext_free,
- .f_add = rte_table_hash_ext_entry_add,
- .f_delete = rte_table_hash_ext_entry_delete,
- .f_add_bulk = NULL,
- .f_delete_bulk = NULL,
- .f_lookup = rte_table_hash_ext_lookup_dosig,
- .f_stats = rte_table_hash_ext_stats_read,
-};
diff --git a/lib/librte_table/rte_table_hash_key16.c b/lib/librte_table/rte_table_hash_key16.c
index ce057b78..b541735c 100644
--- a/lib/librte_table/rte_table_hash_key16.c
+++ b/lib/librte_table/rte_table_hash_key16.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
@@ -42,7 +42,9 @@
#include "rte_table_hash.h"
#include "rte_lru.h"
-#define RTE_TABLE_HASH_KEY_SIZE 16
+#define KEY_SIZE 16
+
+#define KEYS_PER_BUCKET 4
#define RTE_BUCKET_ENTRY_VALID 0x1LLU
@@ -79,11 +81,9 @@ struct rte_table_hash {
/* Input parameters */
uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
uint32_t key_size;
uint32_t entry_size;
uint32_t bucket_size;
- uint32_t signature_offset;
uint32_t key_offset;
uint64_t key_mask[2];
rte_table_hash_op_hash f_hash;
@@ -99,17 +99,55 @@ struct rte_table_hash {
};
static int
-check_params_create_lru(struct rte_table_hash_key16_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+keycmp(void *a, void *b, void *b_mask)
+{
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+
+ return (a64[0] != (b64[0] & b_mask64[0])) ||
+ (a64[1] != (b64[1] & b_mask64[1]));
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+
+ dst64[0] = src64[0] & src_mask64[0];
+ dst64[1] = src64[1] & src_mask64[1];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key_size */
+ if (params->key_size != KEY_SIZE) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if (params->n_keys == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_buckets))) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
/* f_hash */
if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: f_hash function pointer is NULL\n", __func__);
+ RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
+ __func__);
return -EINVAL;
}
@@ -121,46 +159,67 @@ rte_table_hash_create_key16_lru(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_table_hash_key16_lru_params *p =
- (struct rte_table_hash_key16_lru_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket,
- key_size, bucket_size_cl, total_size, i;
+ uint64_t bucket_size, total_size;
+ uint32_t n_buckets, i;
/* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
((sizeof(struct rte_bucket_4_16) % 64) != 0))
return NULL;
- n_entries_per_bucket = 4;
- key_size = 16;
+
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of buckets (n_buckets) so that there a chance
+ * to store n_keys keys in the table.
+ *
+ * Note: Since the buckets do not get extended, it is not possible to
+ * guarantee that n_keys keys can be stored in the table at any time. In the
+ * worst case scenario when all the n_keys fall into the same bucket, only
+ * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
+ * defeats the purpose of the hash table. It indicates unsuitable f_hash or
+ * n_keys to n_buckets ratio.
+ *
+ * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
+ */
+ n_buckets = rte_align32pow2(
+ (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
+ n_buckets = RTE_MAX(n_buckets, p->n_buckets);
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
+ KEYS_PER_BUCKET * entry_size);
+ total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
@@ -212,19 +271,19 @@ rte_table_hash_entry_add_key16_lru(
uint64_t signature, pos;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
+ &f->memory[bucket_index * f->bucket_size];
signature |= RTE_BUCKET_ENTRY_VALID;
/* Key is present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
memcpy(bucket_data, entry, f->entry_size);
@@ -238,13 +297,13 @@ rte_table_hash_entry_add_key16_lru(
/* Key is not present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if (bucket_signature == 0) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
+ keycpy(bucket_key, key, f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
lru_update(bucket, i);
*key_found = 0;
@@ -257,7 +316,7 @@ rte_table_hash_entry_add_key16_lru(
/* Bucket full: replace LRU entry */
pos = lru_pos(bucket);
bucket->signature[pos] = signature;
- memcpy(bucket->key[pos], key, f->key_size);
+ keycpy(&bucket->key[pos], key, f->key_mask);
memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
lru_update(bucket, pos);
*key_found = 0;
@@ -278,19 +337,19 @@ rte_table_hash_entry_delete_key16_lru(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
+ &f->memory[bucket_index * f->bucket_size];
signature |= RTE_BUCKET_ENTRY_VALID;
/* Key is present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature[i] = 0;
@@ -306,81 +365,71 @@ rte_table_hash_entry_delete_key16_lru(
return 0;
}
-static int
-check_params_create_ext(struct rte_table_hash_key16_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: f_hash function pointer is NULL\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void *
rte_table_hash_create_key16_ext(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_table_hash_key16_ext_params *p =
- (struct rte_table_hash_key16_ext_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size,
- bucket_size_cl, stack_size_cl, total_size, i;
+ uint64_t bucket_size, stack_size, total_size;
+ uint32_t n_buckets_ext, i;
/* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
((sizeof(struct rte_bucket_4_16) % 64) != 0))
return NULL;
- n_entries_per_bucket = 4;
- key_size = 16;
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
+ * it is guaranteed that n_keys keys can be stored in the table at any time.
+ *
+ * The worst case scenario takes place when all the n_keys keys fall into
+ * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
+ * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
+ * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
+ * into a different bucket. This case defeats the purpose of the hash table.
+ * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
+ *
+ * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
+ */
+ n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
+ KEYS_PER_BUCKET * entry_size);
+ stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
total_size = sizeof(struct rte_table_hash) +
- ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
+ (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->n_buckets = p->n_buckets;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
@@ -388,10 +437,7 @@ rte_table_hash_create_key16_ext(void *params,
f->n_buckets_ext = n_buckets_ext;
f->stack_pos = n_buckets_ext;
f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
-
- for (i = 0; i < n_buckets_ext; i++)
- f->stack[i] = i;
+ &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
if (p->key_mask != NULL) {
f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
@@ -401,6 +447,9 @@ rte_table_hash_create_key16_ext(void *params,
f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
}
+ for (i = 0; i < n_buckets_ext; i++)
+ f->stack[i] = i;
+
return f;
}
@@ -432,20 +481,20 @@ rte_table_hash_entry_add_key16_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_16 *)
- &f->memory[bucket_index * f->bucket_size];
+ &f->memory[bucket_index * f->bucket_size];
signature |= RTE_BUCKET_ENTRY_VALID;
/* Key is present in the bucket */
for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
@@ -458,17 +507,17 @@ rte_table_hash_entry_add_key16_ext(
/* Key is not present in the bucket */
for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
- bucket_prev = bucket, bucket = bucket->next)
+ bucket_prev = bucket, bucket = bucket->next)
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if (bucket_signature == 0) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
+ keycpy(bucket_key, key, f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) bucket_data;
@@ -487,7 +536,7 @@ rte_table_hash_entry_add_key16_ext(
bucket_prev->next_valid = 1;
bucket->signature[0] = signature;
- memcpy(bucket->key[0], key, f->key_size);
+ keycpy(&bucket->key[0], key, f->key_mask);
memcpy(&bucket->data[0], entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) &bucket->data[0];
@@ -509,7 +558,7 @@ rte_table_hash_entry_delete_key16_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_16 *)
&f->memory[bucket_index * f->bucket_size];
@@ -520,18 +569,17 @@ rte_table_hash_entry_delete_key16_ext(
bucket_prev = bucket, bucket = bucket->next)
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
bucket->signature[i] = 0;
*key_found = 1;
if (entry)
- memcpy(entry, bucket_data,
- f->entry_size);
+ memcpy(entry, bucket_data, f->entry_size);
if ((bucket->signature[0] == 0) &&
(bucket->signature[1] == 0) &&
@@ -558,26 +606,28 @@ rte_table_hash_entry_delete_key16_ext(
return 0;
}
-#define lookup_key16_cmp(key_in, bucket, pos) \
+#define lookup_key16_cmp(key_in, bucket, pos, f) \
{ \
- uint64_t xor[4][2], or[4], signature[4]; \
+ uint64_t xor[4][2], or[4], signature[4], k[2]; \
\
+ k[0] = key_in[0] & f->key_mask[0]; \
+ k[1] = key_in[1] & f->key_mask[1]; \
signature[0] = (~bucket->signature[0]) & 1; \
signature[1] = (~bucket->signature[1]) & 1; \
signature[2] = (~bucket->signature[2]) & 1; \
signature[3] = (~bucket->signature[3]) & 1; \
\
- xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
- xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
+ xor[0][0] = k[0] ^ bucket->key[0][0]; \
+ xor[0][1] = k[1] ^ bucket->key[0][1]; \
\
- xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
- xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
+ xor[1][0] = k[0] ^ bucket->key[1][0]; \
+ xor[1][1] = k[1] ^ bucket->key[1][1]; \
\
- xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
- xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
+ xor[2][0] = k[0] ^ bucket->key[2][0]; \
+ xor[2][1] = k[1] ^ bucket->key[2][1]; \
\
- xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
- xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
+ xor[3][0] = k[0] ^ bucket->key[3][0]; \
+ xor[3][1] = k[1] ^ bucket->key[3][1]; \
\
or[0] = xor[0][0] | xor[0][1] | signature[0]; \
or[1] = xor[1][0] | xor[1][1] | signature[1]; \
@@ -610,30 +660,12 @@ rte_table_hash_entry_delete_key16_ext(
#define lookup1_stage1(mbuf1, bucket1, f) \
{ \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
- rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup1_stage1_dosig(mbuf1, bucket1, f) \
-{ \
uint64_t *key; \
uint64_t signature = 0; \
uint32_t bucket_index; \
- uint64_t hash_key_buffer[2]; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
- \
- hash_key_buffer[0] = key[0] & f->key_mask[0]; \
- hash_key_buffer[1] = key[1] & f->key_mask[1]; \
- signature = f->f_hash(hash_key_buffer, \
- RTE_TABLE_HASH_KEY_SIZE, f->seed); \
+ signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
\
bucket_index = signature & (f->n_buckets - 1); \
bucket1 = (struct rte_bucket_4_16 *) \
@@ -648,14 +680,10 @@ rte_table_hash_entry_delete_key16_ext(
void *a; \
uint64_t pkt_mask; \
uint64_t *key; \
- uint64_t hash_key_buffer[2]; \
uint32_t pos; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer[0] = key[0] & f->key_mask[0]; \
- hash_key_buffer[1] = key[1] & f->key_mask[1]; \
- \
- lookup_key16_cmp(hash_key_buffer, bucket2, pos); \
+ lookup_key16_cmp(key, bucket2, pos, f); \
\
pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -673,14 +701,10 @@ rte_table_hash_entry_delete_key16_ext(
void *a; \
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
- uint64_t hash_key_buffer[2]; \
uint32_t pos; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer[0] = key[0] & f->key_mask[0]; \
- hash_key_buffer[1] = key[1] & f->key_mask[1]; \
- \
- lookup_key16_cmp(hash_key_buffer, bucket2, pos); \
+ lookup_key16_cmp(key, bucket2, pos, f); \
\
pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -703,15 +727,11 @@ rte_table_hash_entry_delete_key16_ext(
void *a; \
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
- uint64_t hash_key_buffer[2]; \
uint32_t pos; \
\
bucket = buckets[pkt_index]; \
key = keys[pkt_index]; \
- hash_key_buffer[0] = key[0] & f->key_mask[0]; \
- hash_key_buffer[1] = key[1] & f->key_mask[1]; \
- \
- lookup_key16_cmp(hash_key_buffer, bucket, pos); \
+ lookup_key16_cmp(key, bucket, pos, f); \
\
pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
pkts_mask_out |= pkt_mask; \
@@ -775,36 +795,12 @@ rte_table_hash_entry_delete_key16_ext(
#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- \
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
- \
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_16 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
- rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
-}
-
-#define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f) \
-{ \
uint64_t *key10, *key11; \
- uint64_t hash_offset_buffer[2]; \
uint64_t signature10, signature11; \
uint32_t bucket10_index, bucket11_index; \
\
key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset);\
- hash_offset_buffer[0] = key10[0] & f->key_mask[0]; \
- hash_offset_buffer[1] = key10[1] & f->key_mask[1]; \
- signature10 = f->f_hash(hash_offset_buffer, \
- RTE_TABLE_HASH_KEY_SIZE, f->seed);\
+ signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed);\
bucket10_index = signature10 & (f->n_buckets - 1); \
bucket10 = (struct rte_bucket_4_16 *) \
&f->memory[bucket10_index * f->bucket_size]; \
@@ -812,10 +808,7 @@ rte_table_hash_entry_delete_key16_ext(
rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
\
key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset);\
- hash_offset_buffer[0] = key11[0] & f->key_mask[0]; \
- hash_offset_buffer[1] = key11[1] & f->key_mask[1]; \
- signature11 = f->f_hash(hash_offset_buffer, \
- RTE_TABLE_HASH_KEY_SIZE, f->seed);\
+ signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
bucket11_index = signature11 & (f->n_buckets - 1); \
bucket11 = (struct rte_bucket_4_16 *) \
&f->memory[bucket11_index * f->bucket_size]; \
@@ -829,19 +822,13 @@ rte_table_hash_entry_delete_key16_ext(
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask; \
uint64_t *key20, *key21; \
- uint64_t hash_key_buffer20[2]; \
- uint64_t hash_key_buffer21[2]; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_key_buffer20[0] = key20[0] & f->key_mask[0]; \
- hash_key_buffer20[1] = key20[1] & f->key_mask[1]; \
- hash_key_buffer21[0] = key21[0] & f->key_mask[0]; \
- hash_key_buffer21[1] = key21[1] & f->key_mask[1]; \
\
- lookup_key16_cmp(hash_key_buffer20, bucket20, pos20); \
- lookup_key16_cmp(hash_key_buffer21, bucket21, pos21); \
+ lookup_key16_cmp(key20, bucket20, pos20, f); \
+ lookup_key16_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
@@ -864,19 +851,13 @@ rte_table_hash_entry_delete_key16_ext(
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
uint64_t *key20, *key21; \
- uint64_t hash_key_buffer20[2]; \
- uint64_t hash_key_buffer21[2]; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_key_buffer20[0] = key20[0] & f->key_mask[0]; \
- hash_key_buffer20[1] = key20[1] & f->key_mask[1]; \
- hash_key_buffer21[0] = key21[0] & f->key_mask[0]; \
- hash_key_buffer21[1] = key21[1] & f->key_mask[1]; \
\
- lookup_key16_cmp(hash_key_buffer20, bucket20, pos20); \
- lookup_key16_cmp(hash_key_buffer21, bucket21, pos21); \
+ lookup_key16_cmp(key20, bucket20, pos20, f); \
+ lookup_key16_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
@@ -916,6 +897,7 @@ rte_table_hash_lookup_key16_lru(
uint64_t pkts_mask_out = 0;
__rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+
RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
/* Cannot run the pipeline with less than 5 packets */
@@ -932,8 +914,8 @@ rte_table_hash_lookup_key16_lru(
}
*lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f,
- n_pkts_in - __builtin_popcountll(pkts_mask_out));
+ RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
+ __builtin_popcountll(pkts_mask_out));
return 0;
}
@@ -1026,136 +1008,7 @@ rte_table_hash_lookup_key16_lru(
RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
__builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key16_lru() */
-
-static int
-rte_table_hash_lookup_key16_lru_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
-
- RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_16 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
- __builtin_popcountll(pkts_mask_out));
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
- __builtin_popcountll(pkts_mask_out));
- return 0;
-} /* rte_table_hash_lookup_key16_lru_dosig() */
+} /* lookup LRU */
static int
rte_table_hash_lookup_key16_ext(
@@ -1175,6 +1028,7 @@ rte_table_hash_lookup_key16_ext(
uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
__rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
+
RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
/* Cannot run the pipeline with less than 5 packets */
@@ -1306,159 +1160,7 @@ grind_next_buckets:
RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
__builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key16_ext() */
-
-static int
-rte_table_hash_lookup_key16_ext_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
-
- RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_16 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask,
- buckets, keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
- __builtin_popcountll(pkts_mask_out));
- return 0;
-} /* rte_table_hash_lookup_key16_ext_dosig() */
+} /* lookup EXT */
static int
rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
@@ -1485,15 +1187,6 @@ struct rte_table_ops rte_table_hash_key16_lru_ops = {
.f_stats = rte_table_hash_key16_stats_read,
};
-struct rte_table_ops rte_table_hash_key16_lru_dosig_ops = {
- .f_create = rte_table_hash_create_key16_lru,
- .f_free = rte_table_hash_free_key16_lru,
- .f_add = rte_table_hash_entry_add_key16_lru,
- .f_delete = rte_table_hash_entry_delete_key16_lru,
- .f_lookup = rte_table_hash_lookup_key16_lru_dosig,
- .f_stats = rte_table_hash_key16_stats_read,
-};
-
struct rte_table_ops rte_table_hash_key16_ext_ops = {
.f_create = rte_table_hash_create_key16_ext,
.f_free = rte_table_hash_free_key16_ext,
@@ -1504,12 +1197,3 @@ struct rte_table_ops rte_table_hash_key16_ext_ops = {
.f_lookup = rte_table_hash_lookup_key16_ext,
.f_stats = rte_table_hash_key16_stats_read,
};
-
-struct rte_table_ops rte_table_hash_key16_ext_dosig_ops = {
- .f_create = rte_table_hash_create_key16_ext,
- .f_free = rte_table_hash_free_key16_ext,
- .f_add = rte_table_hash_entry_add_key16_ext,
- .f_delete = rte_table_hash_entry_delete_key16_ext,
- .f_lookup = rte_table_hash_lookup_key16_ext_dosig,
- .f_stats = rte_table_hash_key16_stats_read,
-};
diff --git a/lib/librte_table/rte_table_hash_key32.c b/lib/librte_table/rte_table_hash_key32.c
index 31fe6fda..d4364d62 100644
--- a/lib/librte_table/rte_table_hash_key32.c
+++ b/lib/librte_table/rte_table_hash_key32.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
@@ -42,7 +42,9 @@
#include "rte_table_hash.h"
#include "rte_lru.h"
-#define RTE_TABLE_HASH_KEY_SIZE 32
+#define KEY_SIZE 32
+
+#define KEYS_PER_BUCKET 4
#define RTE_BUCKET_ENTRY_VALID 0x1LLU
@@ -79,12 +81,11 @@ struct rte_table_hash {
/* Input parameters */
uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
uint32_t key_size;
uint32_t entry_size;
uint32_t bucket_size;
- uint32_t signature_offset;
uint32_t key_offset;
+ uint64_t key_mask[4];
rte_table_hash_op_hash f_hash;
uint64_t seed;
@@ -98,10 +99,52 @@ struct rte_table_hash {
};
static int
-check_params_create_lru(struct rte_table_hash_key32_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+keycmp(void *a, void *b, void *b_mask)
+{
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+
+ return (a64[0] != (b64[0] & b_mask64[0])) ||
+ (a64[1] != (b64[1] & b_mask64[1])) ||
+ (a64[2] != (b64[2] & b_mask64[2])) ||
+ (a64[3] != (b64[3] & b_mask64[3]));
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+
+ dst64[0] = src64[0] & src_mask64[0];
+ dst64[1] = src64[1] & src_mask64[1];
+ dst64[2] = src64[2] & src_mask64[2];
+ dst64[3] = src64[3] & src_mask64[3];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key_size */
+ if (params->key_size != KEY_SIZE) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if (params->n_keys == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_buckets))) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
@@ -120,51 +163,83 @@ rte_table_hash_create_key32_lru(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_table_hash_key32_lru_params *p =
- (struct rte_table_hash_key32_lru_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
- uint32_t total_size, i;
+ uint64_t bucket_size, total_size;
+ uint32_t n_buckets, i;
/* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_32) % 64) != 0)) {
+ ((sizeof(struct rte_bucket_4_32) % 64) != 0))
return NULL;
- }
- n_entries_per_bucket = 4;
- key_size = 32;
+
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of buckets (n_buckets) so that there a chance
+ * to store n_keys keys in the table.
+ *
+ * Note: Since the buckets do not get extended, it is not possible to
+ * guarantee that n_keys keys can be stored in the table at any time. In the
+ * worst case scenario when all the n_keys fall into the same bucket, only
+ * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
+ * defeats the purpose of the hash table. It indicates unsuitable f_hash or
+ * n_keys to n_buckets ratio.
+ *
+ * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
+ */
+ n_buckets = rte_align32pow2(
+ (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
+ n_buckets = RTE_MAX(n_buckets, p->n_buckets);
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
+ KEYS_PER_BUCKET * entry_size);
+ total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n", __func__,
- total_size);
+ "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
+ if (p->key_mask != NULL) {
+ f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
+ f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
+ f->key_mask[2] = ((uint64_t *)p->key_mask)[2];
+ f->key_mask[3] = ((uint64_t *)p->key_mask)[3];
+ } else {
+ f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
+ }
+
for (i = 0; i < n_buckets; i++) {
struct rte_bucket_4_32 *bucket;
@@ -204,7 +279,7 @@ rte_table_hash_entry_add_key32_lru(
uint64_t signature, pos;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_32 *)
&f->memory[bucket_index * f->bucket_size];
@@ -213,10 +288,10 @@ rte_table_hash_entry_add_key32_lru(
/* Key is present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
memcpy(bucket_data, entry, f->entry_size);
@@ -230,13 +305,13 @@ rte_table_hash_entry_add_key32_lru(
/* Key is not present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if (bucket_signature == 0) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
+ keycpy(bucket_key, key, f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
lru_update(bucket, i);
*key_found = 0;
@@ -249,10 +324,10 @@ rte_table_hash_entry_add_key32_lru(
/* Bucket full: replace LRU entry */
pos = lru_pos(bucket);
bucket->signature[pos] = signature;
- memcpy(bucket->key[pos], key, f->key_size);
+ keycpy(&bucket->key[pos], key, f->key_mask);
memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
lru_update(bucket, pos);
- *key_found = 0;
+ *key_found = 0;
*entry_ptr = (void *) &bucket->data[pos * f->entry_size];
return 0;
@@ -270,7 +345,7 @@ rte_table_hash_entry_delete_key32_lru(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_32 *)
&f->memory[bucket_index * f->bucket_size];
@@ -279,10 +354,10 @@ rte_table_hash_entry_delete_key32_lru(
/* Key is present in the bucket */
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature[i] = 0;
@@ -299,81 +374,72 @@ rte_table_hash_entry_delete_key32_lru(
return 0;
}
-static int
-check_params_create_ext(struct rte_table_hash_key32_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void *
rte_table_hash_create_key32_ext(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_table_hash_key32_ext_params *p =
- params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
- uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
+ uint64_t bucket_size, stack_size, total_size;
+ uint32_t n_buckets_ext, i;
/* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
((sizeof(struct rte_bucket_4_32) % 64) != 0))
return NULL;
- n_entries_per_bucket = 4;
- key_size = 32;
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
+ * it is guaranteed that n_keys keys can be stored in the table at any time.
+ *
+ * The worst case scenario takes place when all the n_keys keys fall into
+ * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
+ * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
+ * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
+ * into a different bucket. This case defeats the purpose of the hash table.
+ * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
+ *
+ * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
+ */
+ n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
- * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
+ KEYS_PER_BUCKET * entry_size);
+ stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
total_size = sizeof(struct rte_table_hash) +
- ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
+ (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n", __func__,
- total_size);
+ "%s: Hash table %s memory footprint "
+ "is %" PRIu64" bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->n_buckets = p->n_buckets;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
@@ -381,7 +447,19 @@ rte_table_hash_create_key32_ext(void *params,
f->n_buckets_ext = n_buckets_ext;
f->stack_pos = n_buckets_ext;
f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+ &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
+
+ if (p->key_mask != NULL) {
+ f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
+ f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
+ f->key_mask[2] = (((uint64_t *)p->key_mask)[2]);
+ f->key_mask[3] = (((uint64_t *)p->key_mask)[3]);
+ } else {
+ f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
+ f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
+ }
for (i = 0; i < n_buckets_ext; i++)
f->stack[i] = i;
@@ -417,7 +495,7 @@ rte_table_hash_entry_add_key32_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_32 *)
&f->memory[bucket_index * f->bucket_size];
@@ -427,10 +505,10 @@ rte_table_hash_entry_add_key32_ext(
for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
@@ -448,14 +526,14 @@ rte_table_hash_entry_add_key32_ext(
bucket_prev = bucket, bucket = bucket->next)
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if (bucket_signature == 0) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
bucket->signature[i] = signature;
- memcpy(bucket_key, key, f->key_size);
+ keycpy(bucket_key, key, f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) bucket_data;
@@ -475,7 +553,7 @@ rte_table_hash_entry_add_key32_ext(
bucket_prev->next_valid = 1;
bucket->signature[0] = signature;
- memcpy(bucket->key[0], key, f->key_size);
+ keycpy(&bucket->key[0], key, f->key_mask);
memcpy(&bucket->data[0], entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) &bucket->data[0];
@@ -497,7 +575,7 @@ rte_table_hash_entry_delete_key32_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_32 *)
&f->memory[bucket_index * f->bucket_size];
@@ -508,24 +586,23 @@ rte_table_hash_entry_delete_key32_ext(
bucket_prev = bucket, bucket = bucket->next)
for (i = 0; i < 4; i++) {
uint64_t bucket_signature = bucket->signature[i];
- uint8_t *bucket_key = (uint8_t *) bucket->key[i];
+ uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
if ((bucket_signature == signature) &&
- (memcmp(key, bucket_key, f->key_size) == 0)) {
+ (keycmp(bucket_key, key, f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
bucket->signature[i] = 0;
*key_found = 1;
if (entry)
- memcpy(entry, bucket_data,
- f->entry_size);
+ memcpy(entry, bucket_data, f->entry_size);
if ((bucket->signature[0] == 0) &&
- (bucket->signature[1] == 0) &&
- (bucket->signature[2] == 0) &&
- (bucket->signature[3] == 0) &&
- (bucket_prev != NULL)) {
+ (bucket->signature[1] == 0) &&
+ (bucket->signature[2] == 0) &&
+ (bucket->signature[3] == 0) &&
+ (bucket_prev != NULL)) {
bucket_prev->next = bucket->next;
bucket_prev->next_valid =
bucket->next_valid;
@@ -546,34 +623,39 @@ rte_table_hash_entry_delete_key32_ext(
return 0;
}
-#define lookup_key32_cmp(key_in, bucket, pos) \
+#define lookup_key32_cmp(key_in, bucket, pos, f) \
{ \
- uint64_t xor[4][4], or[4], signature[4]; \
+ uint64_t xor[4][4], or[4], signature[4], k[4]; \
+ \
+ k[0] = key_in[0] & f->key_mask[0]; \
+ k[1] = key_in[1] & f->key_mask[1]; \
+ k[2] = key_in[2] & f->key_mask[2]; \
+ k[3] = key_in[3] & f->key_mask[3]; \
\
signature[0] = ((~bucket->signature[0]) & 1); \
signature[1] = ((~bucket->signature[1]) & 1); \
signature[2] = ((~bucket->signature[2]) & 1); \
signature[3] = ((~bucket->signature[3]) & 1); \
\
- xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
- xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
- xor[0][2] = key_in[2] ^ bucket->key[0][2]; \
- xor[0][3] = key_in[3] ^ bucket->key[0][3]; \
+ xor[0][0] = k[0] ^ bucket->key[0][0]; \
+ xor[0][1] = k[1] ^ bucket->key[0][1]; \
+ xor[0][2] = k[2] ^ bucket->key[0][2]; \
+ xor[0][3] = k[3] ^ bucket->key[0][3]; \
\
- xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
- xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
- xor[1][2] = key_in[2] ^ bucket->key[1][2]; \
- xor[1][3] = key_in[3] ^ bucket->key[1][3]; \
+ xor[1][0] = k[0] ^ bucket->key[1][0]; \
+ xor[1][1] = k[1] ^ bucket->key[1][1]; \
+ xor[1][2] = k[2] ^ bucket->key[1][2]; \
+ xor[1][3] = k[3] ^ bucket->key[1][3]; \
\
- xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
- xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
- xor[2][2] = key_in[2] ^ bucket->key[2][2]; \
- xor[2][3] = key_in[3] ^ bucket->key[2][3]; \
+ xor[2][0] = k[0] ^ bucket->key[2][0]; \
+ xor[2][1] = k[1] ^ bucket->key[2][1]; \
+ xor[2][2] = k[2] ^ bucket->key[2][2]; \
+ xor[2][3] = k[3] ^ bucket->key[2][3]; \
\
- xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
- xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
- xor[3][2] = key_in[2] ^ bucket->key[3][2]; \
- xor[3][3] = key_in[3] ^ bucket->key[3][3]; \
+ xor[3][0] = k[0] ^ bucket->key[3][0]; \
+ xor[3][1] = k[1] ^ bucket->key[3][1]; \
+ xor[3][2] = k[2] ^ bucket->key[3][2]; \
+ xor[3][3] = k[3] ^ bucket->key[3][3]; \
\
or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
@@ -604,12 +686,15 @@ rte_table_hash_entry_delete_key32_ext(
rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
}
-#define lookup1_stage1(mbuf1, bucket1, f) \
+#define lookup1_stage1(mbuf1, bucket1, f) \
{ \
+ uint64_t *key; \
uint64_t signature; \
uint32_t bucket_index; \
\
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
+ key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \
+ signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
+ \
bucket_index = signature & (f->n_buckets - 1); \
bucket1 = (struct rte_bucket_4_32 *) \
&f->memory[bucket_index * f->bucket_size]; \
@@ -627,8 +712,7 @@ rte_table_hash_entry_delete_key32_ext(
uint32_t pos; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key32_cmp(key, bucket2, pos); \
+ lookup_key32_cmp(key, bucket2, pos, f); \
\
pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -649,8 +733,7 @@ rte_table_hash_entry_delete_key32_ext(
uint32_t pos; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- \
- lookup_key32_cmp(key, bucket2, pos); \
+ lookup_key32_cmp(key, bucket2, pos, f); \
\
pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -678,7 +761,7 @@ rte_table_hash_entry_delete_key32_ext(
bucket = buckets[pkt_index]; \
key = keys[pkt_index]; \
\
- lookup_key32_cmp(key, bucket, pos); \
+ lookup_key32_cmp(key, bucket, pos, f); \
\
pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
pkts_mask_out |= pkt_mask; \
@@ -745,22 +828,27 @@ rte_table_hash_entry_delete_key32_ext(
#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
+ uint64_t *key10, *key11; \
+ uint64_t signature10, signature11; \
+ uint32_t bucket10_index, bucket11_index; \
\
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
+ key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \
+ signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \
+ \
+ bucket10_index = signature10 & (f->n_buckets - 1); \
bucket10 = (struct rte_bucket_4_32 *) \
&f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
+ rte_prefetch0(bucket10); \
rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
\
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
+ key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \
+ signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
+ \
+ bucket11_index = signature11 & (f->n_buckets - 1); \
bucket11 = (struct rte_bucket_4_32 *) \
&f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
+ rte_prefetch0(bucket11); \
rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
}
@@ -776,8 +864,8 @@ rte_table_hash_entry_delete_key32_ext(
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
\
- lookup_key32_cmp(key20, bucket20, pos20); \
- lookup_key32_cmp(key21, bucket21, pos21); \
+ lookup_key32_cmp(key20, bucket20, pos20, f); \
+ lookup_key32_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
@@ -805,8 +893,8 @@ rte_table_hash_entry_delete_key32_ext(
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
\
- lookup_key32_cmp(key20, bucket20, pos20); \
- lookup_key32_cmp(key21, bucket21, pos21); \
+ lookup_key32_cmp(key20, bucket20, pos20, f); \
+ lookup_key32_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
diff --git a/lib/librte_table/rte_table_hash_key8.c b/lib/librte_table/rte_table_hash_key8.c
index 5f0c6566..94373043 100644
--- a/lib/librte_table/rte_table_hash_key8.c
+++ b/lib/librte_table/rte_table_hash_key8.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
@@ -42,7 +42,9 @@
#include "rte_table_hash.h"
#include "rte_lru.h"
-#define RTE_TABLE_HASH_KEY_SIZE 8
+#define KEY_SIZE 8
+
+#define KEYS_PER_BUCKET 4
#ifdef RTE_TABLE_STATS_COLLECT
@@ -76,11 +78,9 @@ struct rte_table_hash {
/* Input parameters */
uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
uint32_t key_size;
uint32_t entry_size;
uint32_t bucket_size;
- uint32_t signature_offset;
uint32_t key_offset;
uint64_t key_mask;
rte_table_hash_op_hash f_hash;
@@ -96,10 +96,46 @@ struct rte_table_hash {
};
static int
-check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+keycmp(void *a, void *b, void *b_mask)
+{
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+
+ return a64[0] != (b64[0] & b_mask64[0]);
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+
+ dst64[0] = src64[0] & src_mask64[0];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key_size */
+ if (params->key_size != KEY_SIZE) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if (params->n_keys == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_buckets))) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
@@ -116,47 +152,68 @@ check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
static void *
rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_key8_lru_params *p =
- (struct rte_table_hash_key8_lru_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
- uint32_t total_size, i;
+ uint64_t bucket_size, total_size;
+ uint32_t n_buckets, i;
/* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_8) % 64) != 0)) {
+ ((sizeof(struct rte_bucket_4_8) % 64) != 0))
return NULL;
- }
- n_entries_per_bucket = 4;
- key_size = 8;
+
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of buckets (n_buckets) so that there a chance
+ * to store n_keys keys in the table.
+ *
+ * Note: Since the buckets do not get extended, it is not possible to
+ * guarantee that n_keys keys can be stored in the table at any time. In the
+ * worst case scenario when all the n_keys fall into the same bucket, only
+ * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
+ * defeats the purpose of the hash table. It indicates unsuitable f_hash or
+ * n_keys to n_buckets ratio.
+ *
+ * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
+ */
+ n_buckets = rte_align32pow2(
+ (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
+ n_buckets = RTE_MAX(n_buckets, p->n_buckets);
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_8) +
+ KEYS_PER_BUCKET * entry_size);
+ total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
@@ -205,7 +262,7 @@ rte_table_hash_entry_add_key8_lru(
uint64_t signature, mask, pos;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
@@ -213,10 +270,10 @@ rte_table_hash_entry_add_key8_lru(
/* Key is present in the bucket */
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
memcpy(bucket_data, entry, f->entry_size);
@@ -235,7 +292,7 @@ rte_table_hash_entry_add_key8_lru(
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
+ keycpy(&bucket->key[i], key, &f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
lru_update(bucket, i);
*key_found = 0;
@@ -247,10 +304,10 @@ rte_table_hash_entry_add_key8_lru(
/* Bucket full: replace LRU entry */
pos = lru_pos(bucket);
- bucket->key[pos] = *((uint64_t *) key);
+ keycpy(&bucket->key[pos], key, &f->key_mask);
memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
lru_update(bucket, pos);
- *key_found = 0;
+ *key_found = 0;
*entry_ptr = (void *) &bucket->data[pos * f->entry_size];
return 0;
@@ -268,7 +325,7 @@ rte_table_hash_entry_delete_key8_lru(
uint64_t signature, mask;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
@@ -276,10 +333,10 @@ rte_table_hash_entry_delete_key8_lru(
/* Key is present in the bucket */
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature &= ~mask;
@@ -296,79 +353,71 @@ rte_table_hash_entry_delete_key8_lru(
return 0;
}
-static int
-check_params_create_ext(struct rte_table_hash_key8_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void *
rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_key8_ext_params *p =
- (struct rte_table_hash_key8_ext_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size;
- uint32_t bucket_size_cl, stack_size_cl, total_size, i;
+ uint64_t bucket_size, stack_size, total_size;
+ uint32_t n_buckets_ext, i;
/* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
((sizeof(struct rte_bucket_4_8) % 64) != 0))
return NULL;
- n_entries_per_bucket = 4;
- key_size = 8;
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
+ * it is guaranteed that n_keys keys can be stored in the table at any time.
+ *
+ * The worst case scenario takes place when all the n_keys keys fall into
+ * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
+ * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
+ * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
+ * into a different bucket. This case defeats the purpose of the hash table.
+ * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
+ *
+ * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
+ */
+ n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + ((n_buckets +
- n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_8) +
+ KEYS_PER_BUCKET * entry_size);
+ stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
+ total_size = sizeof(struct rte_table_hash) +
+ (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->n_buckets = p->n_buckets;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
@@ -376,7 +425,7 @@ rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
f->n_buckets_ext = n_buckets_ext;
f->stack_pos = n_buckets_ext;
f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+ &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
if (p->key_mask != NULL)
f->key_mask = ((uint64_t *)p->key_mask)[0];
@@ -417,7 +466,7 @@ rte_table_hash_entry_add_key8_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
@@ -428,10 +477,10 @@ rte_table_hash_entry_add_key8_ext(
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
@@ -456,7 +505,7 @@ rte_table_hash_entry_add_key8_ext(
f->entry_size];
bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
+ keycpy(&bucket->key[i], key, &f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) bucket_data;
@@ -476,7 +525,7 @@ rte_table_hash_entry_add_key8_ext(
bucket_prev->next_valid = 1;
bucket->signature = 1;
- bucket->key[0] = *((uint64_t *) key);
+ keycpy(&bucket->key[0], key, &f->key_mask);
memcpy(&bucket->data[0], entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) &bucket->data[0];
@@ -498,7 +547,7 @@ rte_table_hash_entry_delete_key8_ext(
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
@@ -510,10 +559,10 @@ rte_table_hash_entry_delete_key8_ext(
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
@@ -546,16 +595,17 @@ rte_table_hash_entry_delete_key8_ext(
return 0;
}
-#define lookup_key8_cmp(key_in, bucket, pos) \
+#define lookup_key8_cmp(key_in, bucket, pos, f) \
{ \
- uint64_t xor[4], signature; \
+ uint64_t xor[4], signature, k; \
\
signature = ~bucket->signature; \
\
- xor[0] = (key_in[0] ^ bucket->key[0]) | (signature & 1);\
- xor[1] = (key_in[0] ^ bucket->key[1]) | (signature & 2);\
- xor[2] = (key_in[0] ^ bucket->key[2]) | (signature & 4);\
- xor[3] = (key_in[0] ^ bucket->key[3]) | (signature & 8);\
+ k = key_in[0] & f->key_mask; \
+ xor[0] = (k ^ bucket->key[0]) | (signature & 1); \
+ xor[1] = (k ^ bucket->key[1]) | (signature & 2); \
+ xor[2] = (k ^ bucket->key[2]) | (signature & 4); \
+ xor[3] = (k ^ bucket->key[3]) | (signature & 8); \
\
pos = 4; \
if (xor[0] == 0) \
@@ -583,27 +633,12 @@ rte_table_hash_entry_delete_key8_ext(
#define lookup1_stage1(mbuf1, bucket1, f) \
{ \
- uint64_t signature; \
- uint32_t bucket_index; \
- \
- signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
- bucket_index = signature & (f->n_buckets - 1); \
- bucket1 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket_index * f->bucket_size]; \
- rte_prefetch0(bucket1); \
-}
-
-#define lookup1_stage1_dosig(mbuf1, bucket1, f) \
-{ \
uint64_t *key; \
uint64_t signature; \
uint32_t bucket_index; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
- hash_key_buffer = *key & f->key_mask; \
- signature = f->f_hash(&hash_key_buffer, \
- RTE_TABLE_HASH_KEY_SIZE, f->seed); \
+ signature = f->f_hash(key, &f->key_mask, KEY_SIZE, f->seed); \
bucket_index = signature & (f->n_buckets - 1); \
bucket1 = (struct rte_bucket_4_8 *) \
&f->memory[bucket_index * f->bucket_size]; \
@@ -617,12 +652,9 @@ rte_table_hash_entry_delete_key8_ext(
uint64_t pkt_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer = key[0] & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket2, pos); \
+ lookup_key8_cmp(key, bucket2, pos, f); \
\
pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -641,12 +673,9 @@ rte_table_hash_entry_delete_key8_ext(
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer = *key & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket2, pos); \
+ lookup_key8_cmp(key, bucket2, pos, f); \
\
pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
@@ -670,13 +699,10 @@ rte_table_hash_entry_delete_key8_ext(
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
bucket = buckets[pkt_index]; \
key = keys[pkt_index]; \
- hash_key_buffer = (*key) & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket, pos); \
+ lookup_key8_cmp(key, bucket, pos, f); \
\
pkt_mask = ((bucket->signature >> pos) & 1LLU) << pkt_index;\
pkts_mask_out |= pkt_mask; \
@@ -738,29 +764,9 @@ rte_table_hash_entry_delete_key8_ext(
rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
}
-#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
-{ \
- uint64_t signature10, signature11; \
- uint32_t bucket10_index, bucket11_index; \
- \
- signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
- bucket10_index = signature10 & (f->n_buckets - 1); \
- bucket10 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket10_index * f->bucket_size]; \
- rte_prefetch0(bucket10); \
- \
- signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
- bucket11_index = signature11 & (f->n_buckets - 1); \
- bucket11 = (struct rte_bucket_4_8 *) \
- &f->memory[bucket11_index * f->bucket_size]; \
- rte_prefetch0(bucket11); \
-}
-
-#define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f)\
+#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f)\
{ \
uint64_t *key10, *key11; \
- uint64_t hash_offset_buffer10; \
- uint64_t hash_offset_buffer11; \
uint64_t signature10, signature11; \
uint32_t bucket10_index, bucket11_index; \
rte_table_hash_op_hash f_hash = f->f_hash; \
@@ -769,18 +775,14 @@ rte_table_hash_entry_delete_key8_ext(
\
key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, key_offset);\
key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, key_offset);\
- hash_offset_buffer10 = *key10 & f->key_mask; \
- hash_offset_buffer11 = *key11 & f->key_mask; \
\
- signature10 = f_hash(&hash_offset_buffer10, \
- RTE_TABLE_HASH_KEY_SIZE, seed); \
+ signature10 = f_hash(key10, &f->key_mask, KEY_SIZE, seed); \
bucket10_index = signature10 & (f->n_buckets - 1); \
bucket10 = (struct rte_bucket_4_8 *) \
&f->memory[bucket10_index * f->bucket_size]; \
rte_prefetch0(bucket10); \
\
- signature11 = f_hash(&hash_offset_buffer11, \
- RTE_TABLE_HASH_KEY_SIZE, seed); \
+ signature11 = f_hash(key11, &f->key_mask, KEY_SIZE, seed); \
bucket11_index = signature11 & (f->n_buckets - 1); \
bucket11 = (struct rte_bucket_4_8 *) \
&f->memory[bucket11_index * f->bucket_size]; \
@@ -793,17 +795,13 @@ rte_table_hash_entry_delete_key8_ext(
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask; \
uint64_t *key20, *key21; \
- uint64_t hash_offset_buffer20; \
- uint64_t hash_offset_buffer21; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_offset_buffer20 = *key20 & f->key_mask; \
- hash_offset_buffer21 = *key21 & f->key_mask; \
\
- lookup_key8_cmp((&hash_offset_buffer20), bucket20, pos20);\
- lookup_key8_cmp((&hash_offset_buffer21), bucket21, pos21);\
+ lookup_key8_cmp(key20, bucket20, pos20, f); \
+ lookup_key8_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
@@ -826,17 +824,13 @@ rte_table_hash_entry_delete_key8_ext(
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
uint64_t *key20, *key21; \
- uint64_t hash_offset_buffer20; \
- uint64_t hash_offset_buffer21; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_offset_buffer20 = *key20 & f->key_mask; \
- hash_offset_buffer21 = *key21 & f->key_mask; \
\
- lookup_key8_cmp((&hash_offset_buffer20), bucket20, pos20);\
- lookup_key8_cmp((&hash_offset_buffer21), bucket21, pos21);\
+ lookup_key8_cmp(key20, bucket20, pos20, f); \
+ lookup_key8_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
@@ -871,8 +865,8 @@ rte_table_hash_lookup_key8_lru(
struct rte_table_hash *f = (struct rte_table_hash *) table;
struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index,
- pkt11_index, pkt20_index, pkt21_index;
+ uint32_t pkt00_index, pkt01_index, pkt10_index;
+ uint32_t pkt11_index, pkt20_index, pkt21_index;
uint64_t pkts_mask_out = 0;
__rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
@@ -888,7 +882,7 @@ rte_table_hash_lookup_key8_lru(
lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
lookup1_stage1(mbuf, bucket, f);
lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
+ pkts_mask_out, entries, f);
}
*lookup_hit_mask = pkts_mask_out;
@@ -984,133 +978,7 @@ rte_table_hash_lookup_key8_lru(
*lookup_hit_mask = pkts_mask_out;
RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key8_lru() */
-
-static int
-rte_table_hash_lookup_key8_lru_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0;
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
- RTE_TABLE_HASH_KEY8_STATS_PKTS_IN_ADD(f, n_pkts_in);
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_lru(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, f);
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
- return 0;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries, f);
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
- return 0;
-} /* rte_table_hash_lookup_key8_lru_dosig() */
+} /* lookup LRU */
static int
rte_table_hash_lookup_key8_ext(
@@ -1142,8 +1010,8 @@ rte_table_hash_lookup_key8_ext(
lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
lookup1_stage1(mbuf, bucket, f);
lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask, buckets,
- keys, f);
+ pkts_mask_out, entries, buckets_mask,
+ buckets, keys, f);
}
goto grind_next_buckets;
@@ -1260,157 +1128,7 @@ grind_next_buckets:
*lookup_hit_mask = pkts_mask_out;
RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key8_ext() */
-
-static int
-rte_table_hash_lookup_key8_ext_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
- struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
- struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
- uint32_t pkt00_index, pkt01_index, pkt10_index;
- uint32_t pkt11_index, pkt20_index, pkt21_index;
- uint64_t pkts_mask_out = 0, buckets_mask = 0;
- struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
- uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
- RTE_TABLE_HASH_KEY8_STATS_PKTS_IN_ADD(f, n_pkts_in);
-
- /* Cannot run the pipeline with less than 5 packets */
- if (__builtin_popcountll(pkts_mask) < 5) {
- for ( ; pkts_mask; ) {
- struct rte_bucket_4_8 *bucket;
- struct rte_mbuf *mbuf;
- uint32_t pkt_index;
-
- lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
- lookup1_stage1_dosig(mbuf, bucket, f);
- lookup1_stage2_ext(pkt_index, mbuf, bucket,
- pkts_mask_out, entries, buckets_mask,
- buckets, keys, f);
- }
-
- goto grind_next_buckets;
- }
-
- /*
- * Pipeline fill
- *
- */
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline feed */
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
- pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
- mbuf00, mbuf01, pkts, pkts_mask, f);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
- }
-
- /*
- * Pipeline flush
- *
- */
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- mbuf10 = mbuf00;
- mbuf11 = mbuf01;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
- /* Pipeline feed */
- bucket20 = bucket10;
- bucket21 = bucket11;
- mbuf20 = mbuf10;
- mbuf21 = mbuf11;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
- bucket20, bucket21, pkts_mask_out, entries,
- buckets_mask, buckets, keys, f);
-
-grind_next_buckets:
- /* Grind next buckets */
- for ( ; buckets_mask; ) {
- uint64_t buckets_mask_next = 0;
-
- for ( ; buckets_mask; ) {
- uint64_t pkt_mask;
- uint32_t pkt_index;
-
- pkt_index = __builtin_ctzll(buckets_mask);
- pkt_mask = 1LLU << pkt_index;
- buckets_mask &= ~pkt_mask;
-
- lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
- entries, buckets_mask_next, f);
- }
-
- buckets_mask = buckets_mask_next;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
- return 0;
-} /* rte_table_hash_lookup_key8_dosig_ext() */
+} /* lookup EXT */
static int
rte_table_hash_key8_stats_read(void *table, struct rte_table_stats *stats, int clear)
@@ -1437,17 +1155,6 @@ struct rte_table_ops rte_table_hash_key8_lru_ops = {
.f_stats = rte_table_hash_key8_stats_read,
};
-struct rte_table_ops rte_table_hash_key8_lru_dosig_ops = {
- .f_create = rte_table_hash_create_key8_lru,
- .f_free = rte_table_hash_free_key8_lru,
- .f_add = rte_table_hash_entry_add_key8_lru,
- .f_delete = rte_table_hash_entry_delete_key8_lru,
- .f_add_bulk = NULL,
- .f_delete_bulk = NULL,
- .f_lookup = rte_table_hash_lookup_key8_lru_dosig,
- .f_stats = rte_table_hash_key8_stats_read,
-};
-
struct rte_table_ops rte_table_hash_key8_ext_ops = {
.f_create = rte_table_hash_create_key8_ext,
.f_free = rte_table_hash_free_key8_ext,
@@ -1458,14 +1165,3 @@ struct rte_table_ops rte_table_hash_key8_ext_ops = {
.f_lookup = rte_table_hash_lookup_key8_ext,
.f_stats = rte_table_hash_key8_stats_read,
};
-
-struct rte_table_ops rte_table_hash_key8_ext_dosig_ops = {
- .f_create = rte_table_hash_create_key8_ext,
- .f_free = rte_table_hash_free_key8_ext,
- .f_add = rte_table_hash_entry_add_key8_ext,
- .f_delete = rte_table_hash_entry_delete_key8_ext,
- .f_add_bulk = NULL,
- .f_delete_bulk = NULL,
- .f_lookup = rte_table_hash_lookup_key8_ext_dosig,
- .f_stats = rte_table_hash_key8_stats_read,
-};
diff --git a/lib/librte_table/rte_table_hash_lru.c b/lib/librte_table/rte_table_hash_lru.c
index 5a4864e2..a07392fd 100644
--- a/lib/librte_table/rte_table_hash_lru.c
+++ b/lib/librte_table/rte_table_hash_lru.c
@@ -1,34 +1,34 @@
/*-
- * BSD LICENSE
+ * BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
@@ -86,7 +86,6 @@ struct rte_table_hash {
uint32_t n_buckets;
rte_table_hash_op_hash f_hash;
uint64_t seed;
- uint32_t signature_offset;
uint32_t key_offset;
/* Internal */
@@ -99,6 +98,7 @@ struct rte_table_hash {
struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
/* Tables */
+ uint64_t *key_mask;
struct bucket *buckets;
uint8_t *key_mem;
uint8_t *data_mem;
@@ -109,29 +109,53 @@ struct rte_table_hash {
};
static int
-check_params_create(struct rte_table_hash_lru_params *params)
+keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
{
- uint32_t n_buckets_min;
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+ uint32_t i;
+
+ for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
+ if (a64[i] != (b64[i] & b_mask64[i]))
+ return 1;
+
+ return 0;
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+ uint32_t i;
+
+ for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
+ dst64[i] = src64[i] & src_mask64[i];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
/* key_size */
- if ((params->key_size == 0) ||
+ if ((params->key_size < sizeof(uint64_t)) ||
(!rte_is_power_of_2(params->key_size))) {
RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
return -EINVAL;
}
/* n_keys */
- if ((params->n_keys == 0) ||
- (!rte_is_power_of_2(params->n_keys))) {
+ if (params->n_keys == 0) {
RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
return -EINVAL;
}
/* n_buckets */
- n_buckets_min = (params->n_keys + KEYS_PER_BUCKET - 1) / params->n_keys;
if ((params->n_buckets == 0) ||
- (!rte_is_power_of_2(params->n_keys)) ||
- (params->n_buckets < n_buckets_min)) {
+ (!rte_is_power_of_2(params->n_buckets))) {
RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
@@ -148,13 +172,13 @@ check_params_create(struct rte_table_hash_lru_params *params)
static void *
rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_lru_params *p =
- params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *t;
- uint32_t total_size, table_meta_sz;
- uint32_t bucket_sz, key_sz, key_stack_sz, data_sz;
- uint32_t bucket_offset, key_offset, key_stack_offset, data_offset;
- uint32_t i;
+ uint64_t table_meta_sz, key_mask_sz, bucket_sz, key_sz, key_stack_sz;
+ uint64_t data_sz, total_size;
+ uint64_t key_mask_offset, bucket_offset, key_offset, key_stack_offset;
+ uint64_t data_offset;
+ uint32_t n_buckets, i;
/* Check input parameters */
if ((check_params_create(p) != 0) ||
@@ -164,33 +188,65 @@ rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
return NULL;
}
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of buckets (n_buckets) so that there a chance
+ * to store n_keys keys in the table.
+ *
+ * Note: Since the buckets do not get extended, it is not possible to
+ * guarantee that n_keys keys can be stored in the table at any time. In the
+ * worst case scenario when all the n_keys fall into the same bucket, only
+ * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
+ * defeats the purpose of the hash table. It indicates unsuitable f_hash or
+ * n_keys to n_buckets ratio.
+ *
+ * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
+ */
+ n_buckets = rte_align32pow2(
+ (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
+ n_buckets = RTE_MAX(n_buckets, p->n_buckets);
+
/* Memory allocation */
table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
- bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
+ key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
+ bucket_sz = RTE_CACHE_LINE_ROUNDUP(n_buckets * sizeof(struct bucket));
key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
- total_size = table_meta_sz + bucket_sz + key_sz + key_stack_sz +
- data_sz;
+ total_size = table_meta_sz + key_mask_sz + bucket_sz + key_sz +
+ key_stack_sz + data_sz;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE,
+ "%s: Cannot allocate %" PRIu64 " bytes for hash "
+ "table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
- t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ t = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (t == NULL) {
RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ "%s: Cannot allocate %" PRIu64 " bytes for hash "
+ "table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table memory footprint is "
- "%u bytes\n", __func__, p->key_size, total_size);
+ RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory footprint"
+ " is %" PRIu64 " bytes\n",
+ __func__, p->key_size, p->name, total_size);
/* Memory initialization */
t->key_size = p->key_size;
t->entry_size = entry_size;
t->n_keys = p->n_keys;
- t->n_buckets = p->n_buckets;
+ t->n_buckets = n_buckets;
t->f_hash = p->f_hash;
t->seed = p->seed;
- t->signature_offset = p->signature_offset;
t->key_offset = p->key_offset;
/* Internal */
@@ -199,16 +255,24 @@ rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
t->data_size_shl = __builtin_ctzl(entry_size);
/* Tables */
- bucket_offset = 0;
+ key_mask_offset = 0;
+ bucket_offset = key_mask_offset + key_mask_sz;
key_offset = bucket_offset + bucket_sz;
key_stack_offset = key_offset + key_sz;
data_offset = key_stack_offset + key_stack_sz;
+ t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
t->buckets = (struct bucket *) &t->memory[bucket_offset];
t->key_mem = &t->memory[key_offset];
t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
t->data_mem = &t->memory[data_offset];
+ /* Key mask */
+ if (p->key_mask == NULL)
+ memset(t->key_mask, 0xFF, p->key_size);
+ else
+ memcpy(t->key_mask, p->key_mask, p->key_size);
+
/* Key stack */
for (i = 0; i < t->n_keys; i++)
t->key_stack[i] = t->n_keys - 1 - i;
@@ -246,7 +310,7 @@ rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
uint64_t sig;
uint32_t bkt_index, i;
- sig = t->f_hash(key, t->key_size, t->seed);
+ sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt = &t->buckets[bkt_index];
sig = (sig >> 16) | 1LLU;
@@ -258,8 +322,8 @@ rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
t->key_size_shl];
- if ((sig == bkt_sig) && (memcmp(key, bkt_key, t->key_size)
- == 0)) {
+ if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
+ t->key_size) == 0)) {
uint8_t *data = &t->data_mem[bkt_key_index <<
t->data_size_shl];
@@ -292,7 +356,7 @@ rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
bkt->sig[i] = (uint16_t) sig;
bkt->key_pos[i] = bkt_key_index;
- memcpy(bkt_key, key, t->key_size);
+ keycpy(bkt_key, key, t->key_mask, t->key_size);
memcpy(data, entry, t->entry_size);
lru_update(bkt, i);
@@ -311,7 +375,7 @@ rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
uint8_t *data = &t->data_mem[bkt_key_index << t->data_size_shl];
bkt->sig[pos] = (uint16_t) sig;
- memcpy(bkt_key, key, t->key_size);
+ keycpy(bkt_key, key, t->key_mask, t->key_size);
memcpy(data, entry, t->entry_size);
lru_update(bkt, pos);
@@ -330,7 +394,7 @@ rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
uint64_t sig;
uint32_t bkt_index, i;
- sig = t->f_hash(key, t->key_size, t->seed);
+ sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt = &t->buckets[bkt_index];
sig = (sig >> 16) | 1LLU;
@@ -343,14 +407,15 @@ rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
t->key_size_shl];
if ((sig == bkt_sig) &&
- (memcmp(key, bkt_key, t->key_size) == 0)) {
+ (keycmp(bkt_key, key, t->key_mask, t->key_size) == 0)) {
uint8_t *data = &t->data_mem[bkt_key_index <<
t->data_size_shl];
bkt->sig[i] = 0;
t->key_stack[t->key_stack_tos++] = bkt_key_index;
*key_found = 1;
- memcpy(entry, data, t->entry_size);
+ if (entry)
+ memcpy(entry, data, t->entry_size);
return 0;
}
}
@@ -365,8 +430,7 @@ static int rte_table_hash_lru_lookup_unoptimized(
struct rte_mbuf **pkts,
uint64_t pkts_mask,
uint64_t *lookup_hit_mask,
- void **entries,
- int dosig)
+ void **entries)
{
struct rte_table_hash *t = (struct rte_table_hash *) table;
uint64_t pkts_mask_out = 0;
@@ -387,11 +451,7 @@ static int rte_table_hash_lru_lookup_unoptimized(
pkt = pkts[pkt_index];
key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
- if (dosig)
- sig = (uint64_t) t->f_hash(key, t->key_size, t->seed);
- else
- sig = RTE_MBUF_METADATA_UINT32(pkt,
- t->signature_offset);
+ sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
bkt_index = sig & t->bucket_mask;
bkt = &t->buckets[bkt_index];
@@ -404,7 +464,7 @@ static int rte_table_hash_lru_lookup_unoptimized(
uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
t->key_size_shl];
- if ((sig == bkt_sig) && (memcmp(key, bkt_key,
+ if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
t->key_size) == 0)) {
uint8_t *data = &t->data_mem[bkt_key_index <<
t->data_size_shl];
@@ -502,74 +562,75 @@ static int rte_table_hash_lru_lookup_unoptimized(
match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
}
-#define lookup_cmp_key(mbuf, key, match_key, f) \
-{ \
+#define lookup_cmp_key(mbuf, key, match_key, f) \
+{ \
uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
- uint64_t *bkt_key = (uint64_t *) key; \
- \
- switch (f->key_size) { \
- case 8: \
- { \
- uint64_t xor = pkt_key[0] ^ bkt_key[0]; \
- match_key = 0; \
- if (xor == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 16: \
- { \
- uint64_t xor[2], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- or = xor[0] | xor[1]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 32: \
- { \
- uint64_t xor[4], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- or = xor[0] | xor[1] | xor[2] | xor[3]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- case 64: \
- { \
- uint64_t xor[8], or; \
- \
- xor[0] = pkt_key[0] ^ bkt_key[0]; \
- xor[1] = pkt_key[1] ^ bkt_key[1]; \
- xor[2] = pkt_key[2] ^ bkt_key[2]; \
- xor[3] = pkt_key[3] ^ bkt_key[3]; \
- xor[4] = pkt_key[4] ^ bkt_key[4]; \
- xor[5] = pkt_key[5] ^ bkt_key[5]; \
- xor[6] = pkt_key[6] ^ bkt_key[6]; \
- xor[7] = pkt_key[7] ^ bkt_key[7]; \
- or = xor[0] | xor[1] | xor[2] | xor[3] | \
- xor[4] | xor[5] | xor[6] | xor[7]; \
- match_key = 0; \
- if (or == 0) \
- match_key = 1; \
- } \
- break; \
- \
- default: \
- match_key = 0; \
- if (memcmp(pkt_key, bkt_key, f->key_size) == 0) \
- match_key = 1; \
- } \
+ uint64_t *bkt_key = (uint64_t *) key; \
+ uint64_t *key_mask = f->key_mask; \
+ \
+ switch (f->key_size) { \
+ case 8: \
+ { \
+ uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ match_key = 0; \
+ if (xor == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 16: \
+ { \
+ uint64_t xor[2], or; \
+ \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
+ or = xor[0] | xor[1]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 32: \
+ { \
+ uint64_t xor[4], or; \
+ \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
+ xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
+ xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ case 64: \
+ { \
+ uint64_t xor[8], or; \
+ \
+ xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
+ xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
+ xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
+ xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
+ xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4]; \
+ xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5]; \
+ xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6]; \
+ xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7]; \
+ or = xor[0] | xor[1] | xor[2] | xor[3] | \
+ xor[4] | xor[5] | xor[6] | xor[7]; \
+ match_key = 0; \
+ if (or == 0) \
+ match_key = 1; \
+ } \
+ break; \
+ \
+ default: \
+ match_key = 0; \
+ if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0) \
+ match_key = 1; \
+ } \
}
#define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index)\
@@ -616,38 +677,7 @@ static int rte_table_hash_lru_lookup_unoptimized(
rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
}
-#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
-{ \
- struct grinder *g10, *g11; \
- uint64_t sig10, sig11, bkt10_index, bkt11_index; \
- struct rte_mbuf *mbuf10, *mbuf11; \
- struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
- uint64_t bucket_mask = t->bucket_mask; \
- uint32_t signature_offset = t->signature_offset; \
- \
- mbuf10 = pkts[pkt10_index]; \
- sig10 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf10, signature_offset);\
- bkt10_index = sig10 & bucket_mask; \
- bkt10 = &buckets[bkt10_index]; \
- \
- mbuf11 = pkts[pkt11_index]; \
- sig11 = (uint64_t) RTE_MBUF_METADATA_UINT32(mbuf11, signature_offset);\
- bkt11_index = sig11 & bucket_mask; \
- bkt11 = &buckets[bkt11_index]; \
- \
- rte_prefetch0(bkt10); \
- rte_prefetch0(bkt11); \
- \
- g10 = &g[pkt10_index]; \
- g10->sig = sig10; \
- g10->bkt = bkt10; \
- \
- g11 = &g[pkt11_index]; \
- g11->sig = sig11; \
- g11->bkt = bkt11; \
-}
-
-#define lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index)\
+#define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index)\
{ \
struct grinder *g10, *g11; \
uint64_t sig10, sig11, bkt10_index, bkt11_index; \
@@ -662,13 +692,13 @@ static int rte_table_hash_lru_lookup_unoptimized(
\
mbuf10 = pkts[pkt10_index]; \
key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset);\
- sig10 = (uint64_t) f_hash(key10, key_size, seed); \
+ sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed);\
bkt10_index = sig10 & bucket_mask; \
bkt10 = &buckets[bkt10_index]; \
\
mbuf11 = pkts[pkt11_index]; \
key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset);\
- sig11 = (uint64_t) f_hash(key11, key_size, seed); \
+ sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed);\
bkt11_index = sig11 & bucket_mask; \
bkt11 = &buckets[bkt11_index]; \
\
@@ -819,7 +849,7 @@ static int rte_table_hash_lru_lookup(
/* Cannot run the pipeline with less than 7 packets */
if (__builtin_popcountll(pkts_mask) < 7)
return rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 0);
+ pkts_mask, lookup_hit_mask, entries);
/* Pipeline stage 0 */
lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
@@ -923,140 +953,7 @@ static int rte_table_hash_lru_lookup(
uint64_t pkts_mask_out_slow = 0;
status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 0);
- pkts_mask_out |= pkts_mask_out_slow;
- }
-
- *lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
- return status;
-}
-
-static int rte_table_hash_lru_lookup_dosig(
- void *table,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask,
- uint64_t *lookup_hit_mask,
- void **entries)
-{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
- struct grinder *g = t->grinders;
- uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
- uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
- uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
- int status = 0;
-
- __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
- RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(t, n_pkts_in);
-
- /* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask, lookup_hit_mask, entries, 1);
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline feed */
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline feed */
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /*
- * Pipeline run
- *
- */
- for ( ; pkts_mask; ) {
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 0 */
- lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
- pkt00_index, pkt01_index);
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index,
- pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
- pkts_mask_out, entries);
- }
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
- pkt10_index = pkt00_index;
- pkt11_index = pkt01_index;
-
- /* Pipeline stage 1 */
- lookup2_stage1_dosig(t, g, pkts, pkt10_index, pkt11_index);
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
- pkt20_index = pkt10_index;
- pkt21_index = pkt11_index;
-
- /* Pipeline stage 2 */
- lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Pipeline feed */
- pkt30_index = pkt20_index;
- pkt31_index = pkt21_index;
-
- /* Pipeline stage 3 */
- lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
- entries);
-
- /* Slow path */
- pkts_mask_match_many &= ~pkts_mask_out;
- if (pkts_mask_match_many) {
- uint64_t pkts_mask_out_slow = 0;
-
- status = rte_table_hash_lru_lookup_unoptimized(table, pkts,
- pkts_mask_match_many, &pkts_mask_out_slow, entries, 1);
+ pkts_mask_match_many, &pkts_mask_out_slow, entries);
pkts_mask_out |= pkts_mask_out_slow;
}
@@ -1089,14 +986,3 @@ struct rte_table_ops rte_table_hash_lru_ops = {
.f_lookup = rte_table_hash_lru_lookup,
.f_stats = rte_table_hash_lru_stats_read,
};
-
-struct rte_table_ops rte_table_hash_lru_dosig_ops = {
- .f_create = rte_table_hash_lru_create,
- .f_free = rte_table_hash_lru_free,
- .f_add = rte_table_hash_lru_entry_add,
- .f_delete = rte_table_hash_lru_entry_delete,
- .f_add_bulk = NULL,
- .f_delete_bulk = NULL,
- .f_lookup = rte_table_hash_lru_lookup_dosig,
- .f_stats = rte_table_hash_lru_stats_read,
-};
diff --git a/lib/librte_table/rte_table_version.map b/lib/librte_table/rte_table_version.map
index e1eaa275..6237252b 100644
--- a/lib/librte_table/rte_table_version.map
+++ b/lib/librte_table/rte_table_version.map
@@ -1,19 +1,16 @@
-DPDK_2.0 {
+DPDK_17.11 {
global:
rte_table_acl_ops;
rte_table_array_ops;
- rte_table_hash_ext_dosig_ops;
+ rte_table_hash_cuckoo_ops;
rte_table_hash_ext_ops;
- rte_table_hash_key8_ext_dosig_ops;
- rte_table_hash_key8_ext_ops;
- rte_table_hash_key8_lru_dosig_ops;
- rte_table_hash_key8_lru_ops;
rte_table_hash_key16_ext_ops;
rte_table_hash_key16_lru_ops;
rte_table_hash_key32_ext_ops;
rte_table_hash_key32_lru_ops;
- rte_table_hash_lru_dosig_ops;
+ rte_table_hash_key8_ext_ops;
+ rte_table_hash_key8_lru_ops;
rte_table_hash_lru_ops;
rte_table_lpm_ipv6_ops;
rte_table_lpm_ops;
@@ -21,18 +18,3 @@ DPDK_2.0 {
local: *;
};
-
-DPDK_2.2 {
- global:
-
- rte_table_hash_key16_ext_dosig_ops;
- rte_table_hash_key16_lru_dosig_ops;
-
-};
-
-DPDK_16.07 {
- global:
-
- rte_table_hash_cuckoo_dosig_ops;
-
-} DPDK_2.0;
diff --git a/lib/librte_timer/Makefile b/lib/librte_timer/Makefile
index 03a15390..eb9c5624 100644
--- a/lib/librte_timer/Makefile
+++ b/lib/librte_timer/Makefile
@@ -35,6 +35,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_timer.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lrte_eal
EXPORT_MAP := rte_timer_version.map
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
index 5ee08408..28decc39 100644
--- a/lib/librte_timer/rte_timer.c
+++ b/lib/librte_timer/rte_timer.c
@@ -43,7 +43,6 @@
#include <rte_cycles.h>
#include <rte_per_lcore.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_lcore.h>
@@ -432,7 +431,8 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
uint64_t period;
if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
- !rte_lcore_is_enabled(tim_lcore)))
+ !(rte_lcore_is_enabled(tim_lcore) ||
+ rte_lcore_has_role(tim_lcore, ROLE_SERVICE))))
return -1;
if (type == PERIODICAL)
@@ -525,7 +525,7 @@ void rte_timer_manage(void)
return;
cur_time = rte_get_timer_cycles();
-#ifdef RTE_ARCH_X86_64
+#ifdef RTE_ARCH_64
/* on 64-bit the value cached in the pending_head.expired will be
* updated atomically, so we can consult that for a quick check here
* outside the lock */
diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile
index 4a116fe3..be182798 100644
--- a/lib/librte_vhost/Makefile
+++ b/lib/librte_vhost/Makefile
@@ -45,10 +45,11 @@ LDLIBS += -lpthread
ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y)
LDLIBS += -lnuma
endif
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev
# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := fd_man.c socket.c vhost.c vhost_user.c \
- virtio_net.c
+SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := fd_man.c iotlb.c socket.c vhost.c \
+ vhost_user.c virtio_net.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost.h
diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c
index 2ceacc9a..4c6fed41 100644
--- a/lib/librte_vhost/fd_man.c
+++ b/lib/librte_vhost/fd_man.c
@@ -222,6 +222,7 @@ fdset_event_dispatch(void *arg)
int remove1, remove2;
int need_shrink;
struct fdset *pfdset = arg;
+ int val;
if (pfdset == NULL)
return NULL;
@@ -239,7 +240,9 @@ fdset_event_dispatch(void *arg)
numfds = pfdset->num;
pthread_mutex_unlock(&pfdset->fd_mutex);
- poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
+ val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
+ if (val < 0)
+ continue;
need_shrink = 0;
for (i = 0; i < numfds; i++) {
diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
new file mode 100644
index 00000000..b74cc6a7
--- /dev/null
+++ b/lib/librte_vhost/iotlb.c
@@ -0,0 +1,350 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Red Hat, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+#include <numaif.h>
+#endif
+
+#include <rte_tailq.h>
+
+#include "iotlb.h"
+#include "vhost.h"
+
+struct vhost_iotlb_entry {
+ TAILQ_ENTRY(vhost_iotlb_entry) next;
+
+ uint64_t iova;
+ uint64_t uaddr;
+ uint64_t size;
+ uint8_t perm;
+};
+
+#define IOTLB_CACHE_SIZE 2048
+
+static void
+vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+ TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ }
+
+ rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
+
+bool
+vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+ uint8_t perm)
+{
+ struct vhost_iotlb_entry *node;
+ bool found = false;
+
+ rte_rwlock_read_lock(&vq->iotlb_pending_lock);
+
+ TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) {
+ if ((node->iova == iova) && (node->perm == perm)) {
+ found = true;
+ break;
+ }
+ }
+
+ rte_rwlock_read_unlock(&vq->iotlb_pending_lock);
+
+ return found;
+}
+
+void
+vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
+ uint64_t iova, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node;
+ int ret;
+
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+ if (ret) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "IOTLB pool empty, clear pending misses\n");
+ vhost_user_iotlb_pending_remove_all(vq);
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
+ return;
+ }
+ }
+
+ node->iova = iova;
+ node->perm = perm;
+
+ rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+ TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next);
+
+ rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
+
+static void
+vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ rte_rwlock_write_lock(&vq->iotlb_pending_lock);
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) {
+ if (node->iova < iova)
+ continue;
+ if (node->iova >= iova + size)
+ continue;
+ if ((node->perm & perm) != node->perm)
+ continue;
+ TAILQ_REMOVE(&vq->iotlb_pending_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ }
+
+ rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
+}
+
+static void
+vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+ TAILQ_REMOVE(&vq->iotlb_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ }
+
+ vq->iotlb_cache_nr = 0;
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+static void
+vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+ int entry_idx;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ entry_idx = rte_rand() % vq->iotlb_cache_nr;
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+ if (!entry_idx) {
+ TAILQ_REMOVE(&vq->iotlb_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ vq->iotlb_cache_nr--;
+ break;
+ }
+ entry_idx--;
+ }
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+void
+vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
+ uint64_t uaddr, uint64_t size, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node, *new_node;
+ int ret;
+
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
+ if (ret) {
+ RTE_LOG(DEBUG, VHOST_CONFIG, "IOTLB pool empty, evict one entry\n");
+ vhost_user_iotlb_cache_random_evict(vq);
+ ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
+ return;
+ }
+ }
+
+ new_node->iova = iova;
+ new_node->uaddr = uaddr;
+ new_node->size = size;
+ new_node->perm = perm;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ TAILQ_FOREACH(node, &vq->iotlb_list, next) {
+ /*
+ * Entries must be invalidated before being updated.
+ * So if iova already in list, assume identical.
+ */
+ if (node->iova == new_node->iova) {
+ rte_mempool_put(vq->iotlb_pool, new_node);
+ goto unlock;
+ } else if (node->iova > new_node->iova) {
+ TAILQ_INSERT_BEFORE(node, new_node, next);
+ vq->iotlb_cache_nr++;
+ goto unlock;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
+ vq->iotlb_cache_nr++;
+
+unlock:
+ vhost_user_iotlb_pending_remove(vq, iova, size, perm);
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+
+}
+
+void
+vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size)
+{
+ struct vhost_iotlb_entry *node, *temp_node;
+
+ if (unlikely(!size))
+ return;
+
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+
+ TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+ /* Sorted list */
+ if (unlikely(iova + size < node->iova))
+ break;
+
+ if (iova < node->iova + node->size) {
+ TAILQ_REMOVE(&vq->iotlb_list, node, next);
+ rte_mempool_put(vq->iotlb_pool, node);
+ vq->iotlb_cache_nr--;
+ }
+ }
+
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+uint64_t
+vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
+ uint64_t *size, uint8_t perm)
+{
+ struct vhost_iotlb_entry *node;
+ uint64_t offset, vva = 0, mapped = 0;
+
+ if (unlikely(!*size))
+ goto out;
+
+ TAILQ_FOREACH(node, &vq->iotlb_list, next) {
+ /* List sorted by iova */
+ if (unlikely(iova < node->iova))
+ break;
+
+ if (iova >= node->iova + node->size)
+ continue;
+
+ if (unlikely((perm & node->perm) != perm)) {
+ vva = 0;
+ break;
+ }
+
+ offset = iova - node->iova;
+ if (!vva)
+ vva = node->uaddr + offset;
+
+ mapped += node->size - offset;
+ iova = node->iova + node->size;
+
+ if (mapped >= *size)
+ break;
+ }
+
+out:
+ /* Only part of the requested chunk is mapped */
+ if (unlikely(mapped < *size))
+ *size = mapped;
+
+ return vva;
+}
+
+int
+vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
+{
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
+ int socket = 0;
+
+ if (vq->iotlb_pool) {
+ /*
+ * The cache has already been initialized,
+ * just drop all cached and pending entries.
+ */
+ vhost_user_iotlb_cache_remove_all(vq);
+ vhost_user_iotlb_pending_remove_all(vq);
+ }
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ if (get_mempolicy(&socket, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR) != 0)
+ socket = 0;
+#endif
+
+ rte_rwlock_init(&vq->iotlb_lock);
+ rte_rwlock_init(&vq->iotlb_pending_lock);
+
+ TAILQ_INIT(&vq->iotlb_list);
+ TAILQ_INIT(&vq->iotlb_pending_list);
+
+ snprintf(pool_name, sizeof(pool_name), "iotlb_cache_%d_%d",
+ dev->vid, vq_index);
+
+ /* If already created, free it and recreate */
+ vq->iotlb_pool = rte_mempool_lookup(pool_name);
+ if (vq->iotlb_pool)
+ rte_mempool_free(vq->iotlb_pool);
+
+ vq->iotlb_pool = rte_mempool_create(pool_name,
+ IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
+ 0, 0, NULL, NULL, NULL, socket,
+ MEMPOOL_F_NO_CACHE_ALIGN |
+ MEMPOOL_F_SP_PUT |
+ MEMPOOL_F_SC_GET);
+ if (!vq->iotlb_pool) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to create IOTLB cache pool (%s)\n",
+ pool_name);
+ return -1;
+ }
+
+ vq->iotlb_cache_nr = 0;
+
+ return 0;
+}
+
diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h
new file mode 100644
index 00000000..f1a050e4
--- /dev/null
+++ b/lib/librte_vhost/iotlb.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Red Hat, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _VHOST_IOTLB_H_
+#define _VHOST_IOTLB_H_
+
+#include <stdbool.h>
+
+#include "vhost.h"
+
+static __rte_always_inline void
+vhost_user_iotlb_rd_lock(struct vhost_virtqueue *vq)
+{
+ rte_rwlock_read_lock(&vq->iotlb_lock);
+}
+
+static __rte_always_inline void
+vhost_user_iotlb_rd_unlock(struct vhost_virtqueue *vq)
+{
+ rte_rwlock_read_unlock(&vq->iotlb_lock);
+}
+
+static __rte_always_inline void
+vhost_user_iotlb_wr_lock(struct vhost_virtqueue *vq)
+{
+ rte_rwlock_write_lock(&vq->iotlb_lock);
+}
+
+static __rte_always_inline void
+vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
+{
+ rte_rwlock_write_unlock(&vq->iotlb_lock);
+}
+
+void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
+ uint64_t uaddr, uint64_t size,
+ uint8_t perm);
+void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size);
+uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
+ uint64_t *size, uint8_t perm);
+bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
+ uint8_t perm);
+void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
+ uint8_t perm);
+int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
+
+#endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 8c974eb1..f6536449 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -56,6 +56,7 @@ extern "C" {
#define RTE_VHOST_USER_CLIENT (1ULL << 0)
#define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
#define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2)
+#define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
/**
* Information relating to memory regions including offsets to
@@ -107,7 +108,10 @@ struct vhost_device_ops {
*/
int (*features_changed)(int vid, uint64_t features);
- void *reserved[4]; /**< Reserved for future extension */
+ int (*new_connection)(int vid);
+ void (*destroy_connection)(int vid);
+
+ void *reserved[2]; /**< Reserved for future extension */
};
/**
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 41aa3f9b..422da002 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -68,6 +68,7 @@ struct vhost_user_socket {
bool is_server;
bool reconnect;
bool dequeue_zero_copy;
+ bool iommu_support;
/*
* The "supported_features" indicates the feature bits the
@@ -217,9 +218,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
vid = vhost_new_device();
if (vid == -1) {
- close(fd);
- free(conn);
- return;
+ goto err;
}
size = strnlen(vsocket->path, PATH_MAX);
@@ -230,24 +229,40 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
+ if (vsocket->notify_ops->new_connection) {
+ ret = vsocket->notify_ops->new_connection(vid);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to add vhost user connection with fd %d\n",
+ fd);
+ goto err;
+ }
+ }
+
conn->connfd = fd;
conn->vsocket = vsocket;
conn->vid = vid;
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
NULL, conn);
if (ret < 0) {
- conn->connfd = -1;
- free(conn);
- close(fd);
RTE_LOG(ERR, VHOST_CONFIG,
"failed to add fd %d into vhost server fdset\n",
fd);
- return;
+
+ if (vsocket->notify_ops->destroy_connection)
+ vsocket->notify_ops->destroy_connection(conn->vid);
+
+ goto err;
}
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
+ return;
+
+err:
+ free(conn);
+ close(fd);
}
/* call back when there is new vhost-user connection from client */
@@ -277,6 +292,9 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
*remove = 1;
vhost_destroy_device(conn->vid);
+ if (vsocket->notify_ops->destroy_connection)
+ vsocket->notify_ops->destroy_connection(conn->vid);
+
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
@@ -652,6 +670,11 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
+ if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) {
+ vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
+ vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
+ }
+
if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 0b6aa1cc..4f8b73a0 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -47,11 +47,49 @@
#include <rte_memory.h>
#include <rte_malloc.h>
#include <rte_vhost.h>
+#include <rte_rwlock.h>
+#include "iotlb.h"
#include "vhost.h"
+#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+/* Called with iotlb_lock read-locked */
+uint64_t
+__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ uint64_t vva, tmp_size;
+
+ if (unlikely(!size))
+ return 0;
+
+ tmp_size = size;
+
+ vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
+ if (tmp_size == size)
+ return vva;
+
+ if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
+ /*
+ * iotlb_lock is read-locked for a full burst,
+ * but it only protects the iotlb cache.
+ * In case of IOTLB miss, we might block on the socket,
+ * which could cause a deadlock with QEMU if an IOTLB update
+ * is being handled. We can safely unlock here to avoid it.
+ */
+ vhost_user_iotlb_rd_unlock(vq);
+
+ vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
+ vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
+
+ vhost_user_iotlb_rd_lock(vq);
+ }
+
+ return 0;
+}
+
struct virtio_net *
get_device(int vid)
{
@@ -102,40 +140,108 @@ free_device(struct virtio_net *dev)
vq = dev->virtqueue[i];
rte_free(vq->shadow_used_ring);
-
+ rte_free(vq->batch_copy_elems);
+ rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
}
rte_free(dev);
}
+int
+vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint64_t size;
+
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ goto out;
+
+ size = sizeof(struct vring_desc) * vq->size;
+ vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.desc_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->desc)
+ return -1;
+
+ size = sizeof(struct vring_avail);
+ size += sizeof(uint16_t) * vq->size;
+ vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.avail_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->avail)
+ return -1;
+
+ size = sizeof(struct vring_used);
+ size += sizeof(struct vring_used_elem) * vq->size;
+ vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.used_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->used)
+ return -1;
+
+out:
+ vq->access_ok = 1;
+
+ return 0;
+}
+
+void
+vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_wr_lock(vq);
+
+ vq->access_ok = 0;
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_wr_unlock(vq);
+}
+
static void
-init_vring_queue(struct vhost_virtqueue *vq)
+init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
+ struct vhost_virtqueue *vq;
+
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed not init vring, out of bound (%d)\n",
+ vring_idx);
+ return;
+ }
+
+ vq = dev->virtqueue[vring_idx];
+
memset(vq, 0, sizeof(struct vhost_virtqueue));
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vhost_user_iotlb_init(dev, vring_idx);
/* Backends are set to -1 indicating an inactive device. */
vq->backend = -1;
- /*
- * always set the vq to enabled; this is to keep compatibility
- * with the old QEMU, whereas there is no SET_VRING_ENABLE message.
- */
- vq->enabled = 1;
-
TAILQ_INIT(&vq->zmbuf_list);
}
static void
-reset_vring_queue(struct vhost_virtqueue *vq)
+reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
+ struct vhost_virtqueue *vq;
int callfd;
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed not init vring, out of bound (%d)\n",
+ vring_idx);
+ return;
+ }
+
+ vq = dev->virtqueue[vring_idx];
callfd = vq->callfd;
- init_vring_queue(vq);
+ init_vring_queue(dev, vring_idx);
vq->callfd = callfd;
}
@@ -152,7 +258,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
}
dev->virtqueue[vring_idx] = vq;
- init_vring_queue(vq);
+ init_vring_queue(dev, vring_idx);
dev->nr_vring += 1;
@@ -174,7 +280,7 @@ reset_device(struct virtio_net *dev)
dev->flags = 0;
for (i = 0; i < dev->nr_vring; i++)
- reset_vring_queue(dev->virtqueue[i]);
+ reset_vring_queue(dev, i);
}
/*
@@ -207,6 +313,7 @@ vhost_new_device(void)
vhost_devices[i] = dev;
dev->vid = i;
+ dev->slave_req_fd = -1;
return i;
}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 6fe72aeb..1cc81c17 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -45,6 +45,7 @@
#include <rte_log.h>
#include <rte_ether.h>
+#include <rte_rwlock.h>
#include "rte_vhost.h"
@@ -81,6 +82,16 @@ struct zcopy_mbuf {
};
TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
+/*
+ * Structure contains the info for each batched memory copy.
+ */
+struct batch_copy_elem {
+ void *dst;
+ void *src;
+ uint32_t len;
+ uint64_t log_addr;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
@@ -102,6 +113,7 @@ struct vhost_virtqueue {
/* Currently unused as polling mode is enabled */
int kickfd;
int enabled;
+ int access_ok;
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
@@ -114,6 +126,17 @@ struct vhost_virtqueue {
struct vring_used_elem *shadow_used_ring;
uint16_t shadow_used_idx;
+ struct vhost_vring_addr ring_addrs;
+
+ struct batch_copy_elem *batch_copy_elems;
+ uint16_t batch_copy_nb_elems;
+
+ rte_rwlock_t iotlb_lock;
+ rte_rwlock_t iotlb_pending_lock;
+ struct rte_mempool *iotlb_pool;
+ TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
+ int iotlb_cache_nr;
+ TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
} __rte_cache_aligned;
/* Old kernels have no such macros defined */
@@ -132,6 +155,37 @@ struct vhost_virtqueue {
#define VIRTIO_NET_F_MTU 3
#endif
+/* Declare IOMMU related bits for older kernels */
+#ifndef VIRTIO_F_IOMMU_PLATFORM
+
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+struct vhost_iotlb_msg {
+ __u64 iova;
+ __u64 size;
+ __u64 uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ __u8 perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+ __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+#endif
+
/*
* Define virtio 1.0 for older kernels
*/
@@ -157,7 +211,8 @@ struct vhost_virtqueue {
(1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
- (1ULL << VIRTIO_NET_F_MTU))
+ (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM))
struct guest_page {
@@ -196,6 +251,8 @@ struct virtio_net {
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
struct guest_page *guest_pages;
+
+ int slave_req_fd;
} __rte_cache_aligned;
@@ -281,7 +338,7 @@ extern uint64_t VHOST_FEATURES;
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Convert guest physical address to host physical address */
-static __rte_always_inline phys_addr_t
+static __rte_always_inline rte_iova_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
@@ -321,4 +378,19 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
*/
void vhost_backend_cleanup(struct virtio_net *dev);
+uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+
+static __rte_always_inline uint64_t
+vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ return rte_vhost_gpa_to_vva(dev->mem, iova);
+
+ return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+}
+
#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index ad2e8d38..f4c7ce46 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -48,6 +48,7 @@
#include <rte_malloc.h>
#include <rte_log.h>
+#include "iotlb.h"
#include "vhost.h"
#include "vhost_user.h"
@@ -76,6 +77,8 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
[VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
[VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
+ [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
+ [VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG",
};
static uint64_t
@@ -122,6 +125,11 @@ vhost_backend_cleanup(struct virtio_net *dev)
munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
dev->log_addr = 0;
}
+
+ if (dev->slave_req_fd >= 0) {
+ close(dev->slave_req_fd);
+ dev->slave_req_fd = -1;
+ }
}
/*
@@ -230,6 +238,15 @@ vhost_user_set_vring_num(struct virtio_net *dev,
return -1;
}
+ vq->batch_copy_elems = rte_malloc(NULL,
+ vq->size * sizeof(struct batch_copy_elem),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->batch_copy_elems) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for batching copy.\n");
+ return -1;
+ }
+
return 0;
}
@@ -297,6 +314,9 @@ out:
dev->virtqueue[index] = vq;
vhost_devices[dev->vid] = dev;
+ if (old_vq != vq)
+ vhost_user_iotlb_init(dev, index);
+
return dev;
}
#else
@@ -307,10 +327,7 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
}
#endif
-/*
- * Converts QEMU virtual address to Vhost virtual address. This function is
- * used to convert the ring addresses to our address space.
- */
+/* Converts QEMU virtual address to Vhost virtual address. */
static uint64_t
qva_to_vva(struct virtio_net *dev, uint64_t qva)
{
@@ -331,50 +348,69 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
return 0;
}
+
/*
- * The virtio device sends us the desc, used and avail ring addresses.
- * This function then converts these to our address space.
+ * Converts ring address to Vhost virtual address.
+ * If IOMMU is enabled, the ring address is a guest IO virtual address,
+ * else it is a QEMU virtual address.
*/
-static int
-vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
+static uint64_t
+ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t ra, uint64_t size)
{
- struct vhost_virtqueue *vq;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+ uint64_t vva;
- if (dev->mem == NULL)
- return -1;
+ vva = vhost_user_iotlb_cache_find(vq, ra,
+ &size, VHOST_ACCESS_RW);
+ if (!vva)
+ vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
- /* addr->index refers to the queue index. The txq 1, rxq is 0. */
- vq = dev->virtqueue[msg->payload.addr.index];
+ return vva;
+ }
+
+ return qva_to_vva(dev, ra);
+}
+
+static struct virtio_net *
+translate_ring_addresses(struct virtio_net *dev, int vq_index)
+{
+ struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
+ struct vhost_vring_addr *addr = &vq->ring_addrs;
/* The addresses are converted from QEMU virtual to Vhost virtual. */
- vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.desc_user_addr);
+ if (vq->desc && vq->avail && vq->used)
+ return dev;
+
+ vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
+ vq, addr->desc_user_addr, sizeof(struct vring_desc));
if (vq->desc == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
dev->vid);
- return -1;
+ return dev;
}
- dev = numa_realloc(dev, msg->payload.addr.index);
- vq = dev->virtqueue[msg->payload.addr.index];
+ dev = numa_realloc(dev, vq_index);
+ vq = dev->virtqueue[vq_index];
+ addr = &vq->ring_addrs;
- vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.avail_user_addr);
+ vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
+ vq, addr->avail_user_addr, sizeof(struct vring_avail));
if (vq->avail == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
dev->vid);
- return -1;
+ return dev;
}
- vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.used_user_addr);
+ vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
+ vq, addr->used_user_addr, sizeof(struct vring_used));
if (vq->used == 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
dev->vid);
- return -1;
+ return dev;
}
if (vq->last_used_idx != vq->used->idx) {
@@ -386,7 +422,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
vq->last_avail_idx = vq->used->idx;
}
- vq->log_guest_addr = msg->payload.addr.log_guest_addr;
+ vq->log_guest_addr = addr->log_guest_addr;
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
@@ -397,6 +433,43 @@ vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
dev->vid, vq->log_guest_addr);
+ return dev;
+}
+
+/*
+ * The virtio device sends us the desc, used and avail ring addresses.
+ * This function then converts these to our address space.
+ */
+static int
+vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_addr *addr = &msg->payload.addr;
+ struct virtio_net *dev = *pdev;
+
+ if (dev->mem == NULL)
+ return -1;
+
+ /* addr->index refers to the queue index. The txq 1, rxq is 0. */
+ vq = dev->virtqueue[msg->payload.addr.index];
+
+ /*
+ * Rings addresses should not be interpreted as long as the ring is not
+ * started and enabled
+ */
+ memcpy(&vq->ring_addrs, addr, sizeof(*addr));
+
+ vring_invalidate(dev, vq);
+
+ if (vq->enabled && (dev->features &
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
+ dev = translate_ring_addresses(dev, msg->payload.state.index);
+ if (!dev)
+ return -1;
+
+ *pdev = dev;
+ }
+
return 0;
}
@@ -453,7 +526,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t host_phys_addr;
uint64_t size;
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
@@ -464,7 +537,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size);
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
@@ -620,7 +693,7 @@ err_mmap:
static int
vq_is_ready(struct vhost_virtqueue *vq)
{
- return vq && vq->desc &&
+ return vq && vq->desc && vq->avail && vq->used &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
}
@@ -668,10 +741,11 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
}
static void
-vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
+vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
+ struct virtio_net *dev = *pdev;
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
@@ -681,7 +755,23 @@ vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
RTE_LOG(INFO, VHOST_CONFIG,
"vring kick idx:%d file:%d\n", file.index, file.fd);
+ /* Interpret ring addresses only when ring is started. */
+ dev = translate_ring_addresses(dev, file.index);
+ if (!dev)
+ return;
+
+ *pdev = dev;
+
vq = dev->virtqueue[file.index];
+
+ /*
+ * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
+ * the ring starts already enabled. Otherwise, it is enabled via
+ * the SET_VRING_ENABLE message.
+ */
+ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
+ vq->enabled = 1;
+
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
@@ -741,6 +831,9 @@ vhost_user_get_vring_base(struct virtio_net *dev,
rte_free(vq->shadow_used_ring);
vq->shadow_used_ring = NULL;
+ rte_free(vq->batch_copy_elems);
+ vq->batch_copy_elems = NULL;
+
return 0;
}
@@ -768,6 +861,27 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
}
static void
+vhost_user_get_protocol_features(struct virtio_net *dev,
+ struct VhostUserMsg *msg)
+{
+ uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+
+ rte_vhost_driver_get_features(dev->ifname, &features);
+
+ /*
+ * REPLY_ACK protocol feature is only mandatory for now
+ * for IOMMU feature. If IOMMU is explicitly disabled by the
+ * application, disable also REPLY_ACK feature for older buggy
+ * Qemu versions (from v2.7.0 to v2.9.0).
+ */
+ if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ msg->payload.u64 = protocol_features;
+ msg->size = sizeof(msg->payload.u64);
+}
+
+static void
vhost_user_set_protocol_features(struct virtio_net *dev,
uint64_t protocol_features)
{
@@ -874,6 +988,116 @@ vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
return 0;
}
+static int
+vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+ int fd = msg->fds[0];
+
+ if (fd < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Invalid file descriptor for slave channel (%d)\n",
+ fd);
+ return -1;
+ }
+
+ dev->slave_req_fd = fd;
+
+ return 0;
+}
+
+static int
+is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+{
+ struct vhost_vring_addr *ra;
+ uint64_t start, end;
+
+ start = imsg->iova;
+ end = start + imsg->size;
+
+ ra = &vq->ring_addrs;
+ if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
+ return 1;
+ if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
+ return 1;
+ if (ra->used_user_addr >= start && ra->used_user_addr < end)
+ return 1;
+
+ return 0;
+}
+
+static int
+is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
+ struct vhost_iotlb_msg *imsg)
+{
+ uint64_t istart, iend, vstart, vend;
+
+ istart = imsg->iova;
+ iend = istart + imsg->size - 1;
+
+ vstart = (uintptr_t)vq->desc;
+ vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
+ if (vstart <= iend && istart <= vend)
+ return 1;
+
+ vstart = (uintptr_t)vq->avail;
+ vend = vstart + sizeof(struct vring_avail);
+ vend += sizeof(uint16_t) * vq->size - 1;
+ if (vstart <= iend && istart <= vend)
+ return 1;
+
+ vstart = (uintptr_t)vq->used;
+ vend = vstart + sizeof(struct vring_used);
+ vend += sizeof(struct vring_used_elem) * vq->size - 1;
+ if (vstart <= iend && istart <= vend)
+ return 1;
+
+ return 0;
+}
+
+static int
+vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
+{
+ struct virtio_net *dev = *pdev;
+ struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
+ uint16_t i;
+ uint64_t vva;
+
+ switch (imsg->type) {
+ case VHOST_IOTLB_UPDATE:
+ vva = qva_to_vva(dev, imsg->uaddr);
+ if (!vva)
+ return -1;
+
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
+ imsg->size, imsg->perm);
+
+ if (is_vring_iotlb_update(vq, imsg))
+ *pdev = dev = translate_ring_addresses(dev, i);
+ }
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ vhost_user_iotlb_cache_remove(vq, imsg->iova,
+ imsg->size);
+
+ if (is_vring_iotlb_invalidate(vq, imsg))
+ vring_invalidate(dev, vq);
+ }
+ break;
+ default:
+ RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
+ imsg->type);
+ return -1;
+ }
+
+ return 0;
+}
+
/* return bytes# of read on success or negative val on failure. */
static int
read_vhost_message(int sockfd, struct VhostUserMsg *msg)
@@ -907,8 +1131,16 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
static int
send_vhost_message(int sockfd, struct VhostUserMsg *msg)
{
- int ret;
+ if (!msg)
+ return 0;
+
+ return send_fd_message(sockfd, (char *)msg,
+ VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
+}
+static int
+send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
+{
if (!msg)
return 0;
@@ -917,10 +1149,7 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
msg->flags |= VHOST_USER_VERSION;
msg->flags |= VHOST_USER_REPLY_MASK;
- ret = send_fd_message(sockfd, (char *)msg,
- VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
-
- return ret;
+ return send_vhost_message(sockfd, msg);
}
/*
@@ -931,7 +1160,7 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
{
uint16_t vring_idx;
- switch (msg->request) {
+ switch (msg->request.master) {
case VHOST_USER_SET_VRING_KICK:
case VHOST_USER_SET_VRING_CALL:
case VHOST_USER_SET_VRING_ERR:
@@ -983,7 +1212,7 @@ vhost_user_msg_handler(int vid, int fd)
}
ret = read_vhost_message(fd, &msg);
- if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
+ if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
if (ret < 0)
RTE_LOG(ERR, VHOST_CONFIG,
"vhost read message failed\n");
@@ -998,8 +1227,12 @@ vhost_user_msg_handler(int vid, int fd)
}
ret = 0;
- RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request]);
+ if (msg.request.master != VHOST_USER_IOTLB_MSG)
+ RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[msg.request.master]);
+ else
+ RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[msg.request.master]);
ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
if (ret < 0) {
@@ -1008,20 +1241,19 @@ vhost_user_msg_handler(int vid, int fd)
return -1;
}
- switch (msg.request) {
+ switch (msg.request.master) {
case VHOST_USER_GET_FEATURES:
msg.payload.u64 = vhost_user_get_features(dev);
msg.size = sizeof(msg.payload.u64);
- send_vhost_message(fd, &msg);
+ send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_FEATURES:
vhost_user_set_features(dev, msg.payload.u64);
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
- msg.payload.u64 = VHOST_USER_PROTOCOL_FEATURES;
- msg.size = sizeof(msg.payload.u64);
- send_vhost_message(fd, &msg);
+ vhost_user_get_protocol_features(dev, &msg);
+ send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_PROTOCOL_FEATURES:
vhost_user_set_protocol_features(dev, msg.payload.u64);
@@ -1043,7 +1275,7 @@ vhost_user_msg_handler(int vid, int fd)
/* it needs a reply */
msg.size = sizeof(msg.payload.u64);
- send_vhost_message(fd, &msg);
+ send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_LOG_FD:
close(msg.fds[0]);
@@ -1054,7 +1286,7 @@ vhost_user_msg_handler(int vid, int fd)
vhost_user_set_vring_num(dev, &msg);
break;
case VHOST_USER_SET_VRING_ADDR:
- vhost_user_set_vring_addr(dev, &msg);
+ vhost_user_set_vring_addr(&dev, &msg);
break;
case VHOST_USER_SET_VRING_BASE:
vhost_user_set_vring_base(dev, &msg);
@@ -1063,11 +1295,11 @@ vhost_user_msg_handler(int vid, int fd)
case VHOST_USER_GET_VRING_BASE:
vhost_user_get_vring_base(dev, &msg);
msg.size = sizeof(msg.payload.state);
- send_vhost_message(fd, &msg);
+ send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_VRING_KICK:
- vhost_user_set_vring_kick(dev, &msg);
+ vhost_user_set_vring_kick(&dev, &msg);
break;
case VHOST_USER_SET_VRING_CALL:
vhost_user_set_vring_call(dev, &msg);
@@ -1082,7 +1314,7 @@ vhost_user_msg_handler(int vid, int fd)
case VHOST_USER_GET_QUEUE_NUM:
msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
msg.size = sizeof(msg.payload.u64);
- send_vhost_message(fd, &msg);
+ send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_VRING_ENABLE:
@@ -1096,6 +1328,14 @@ vhost_user_msg_handler(int vid, int fd)
ret = vhost_user_net_set_mtu(dev, &msg);
break;
+ case VHOST_USER_SET_SLAVE_REQ_FD:
+ ret = vhost_user_set_req_fd(dev, &msg);
+ break;
+
+ case VHOST_USER_IOTLB_MSG:
+ ret = vhost_user_iotlb_msg(&dev, &msg);
+ break;
+
default:
ret = -1;
break;
@@ -1105,7 +1345,7 @@ vhost_user_msg_handler(int vid, int fd)
if (msg.flags & VHOST_USER_NEED_REPLY) {
msg.payload.u64 = !!ret;
msg.size = sizeof(msg.payload.u64);
- send_vhost_message(fd, &msg);
+ send_vhost_reply(fd, &msg);
}
if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
@@ -1124,3 +1364,29 @@ vhost_user_msg_handler(int vid, int fd)
return 0;
}
+
+int
+vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
+{
+ int ret;
+ struct VhostUserMsg msg = {
+ .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(msg.payload.iotlb),
+ .payload.iotlb = {
+ .iova = iova,
+ .perm = perm,
+ .type = VHOST_IOTLB_MISS,
+ },
+ };
+
+ ret = send_vhost_message(dev->slave_req_fd, &msg);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to send IOTLB miss message (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h
index 35ebd719..76d9fe2f 100644
--- a/lib/librte_vhost/vhost_user.h
+++ b/lib/librte_vhost/vhost_user.h
@@ -48,16 +48,14 @@
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
#define VHOST_USER_PROTOCOL_F_NET_MTU 4
+#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
-/*
- * disable REPLY_ACK feature to workaround the buggy QEMU implementation.
- * Proved buggy QEMU includes v2.7 - v2.9.
- */
#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
(1ULL << VHOST_USER_PROTOCOL_F_RARP) | \
- (0ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
- (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))
+ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ))
typedef enum VhostUserRequest {
VHOST_USER_NONE = 0,
@@ -81,9 +79,17 @@ typedef enum VhostUserRequest {
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
VHOST_USER_NET_SET_MTU = 20,
+ VHOST_USER_SET_SLAVE_REQ_FD = 21,
+ VHOST_USER_IOTLB_MSG = 22,
VHOST_USER_MAX
} VhostUserRequest;
+typedef enum VhostUserSlaveRequest {
+ VHOST_USER_SLAVE_NONE = 0,
+ VHOST_USER_SLAVE_IOTLB_MSG = 1,
+ VHOST_USER_SLAVE_MAX
+} VhostUserSlaveRequest;
+
typedef struct VhostUserMemoryRegion {
uint64_t guest_phys_addr;
uint64_t memory_size;
@@ -103,7 +109,10 @@ typedef struct VhostUserLog {
} VhostUserLog;
typedef struct VhostUserMsg {
- VhostUserRequest request;
+ union {
+ VhostUserRequest master;
+ VhostUserSlaveRequest slave;
+ } request;
#define VHOST_USER_VERSION_MASK 0x3
#define VHOST_USER_REPLY_MASK (0x1 << 2)
@@ -118,6 +127,7 @@ typedef struct VhostUserMsg {
struct vhost_vring_addr addr;
VhostUserMemory memory;
VhostUserLog log;
+ struct vhost_iotlb_msg iotlb;
} payload;
int fds[VHOST_MEMORY_MAX_NREGIONS];
} __attribute((packed)) VhostUserMsg;
@@ -130,6 +140,7 @@ typedef struct VhostUserMsg {
/* vhost_user.c */
int vhost_user_msg_handler(int vid, int fd);
+int vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm);
/* socket.c */
int read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index a5f0eeba..6fee16e5 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -45,10 +45,13 @@
#include <rte_sctp.h>
#include <rte_arp.h>
+#include "iotlb.h"
#include "vhost.h"
#define MAX_PKT_BURST 32
+#define MAX_BATCH_LEN 256
+
static bool
is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
@@ -105,6 +108,31 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
vq->shadow_used_ring[i].len = len;
}
+static inline void
+do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ struct batch_copy_elem *elem = vq->batch_copy_elems;
+ uint16_t count = vq->batch_copy_nb_elems;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
+ vhost_log_write(dev, elem[i].log_addr, elem[i].len);
+ PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
+ }
+}
+
+static inline void
+do_data_copy_dequeue(struct vhost_virtqueue *vq)
+{
+ struct batch_copy_elem *elem = vq->batch_copy_elems;
+ uint16_t count = vq->batch_copy_nb_elems;
+ int i;
+
+ for (i = 0; i < count; i++)
+ rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
+}
+
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
if ((var) != (val)) \
@@ -168,8 +196,9 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
- struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
+copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct vring_desc *descs, struct rte_mbuf *m,
+ uint16_t desc_idx, uint32_t size)
{
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
@@ -178,16 +207,22 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
uint64_t desc_addr;
/* A counter to avoid desc dead loop chain */
uint16_t nr_desc = 1;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
desc = &descs[desc_idx];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+ desc->len, VHOST_ACCESS_RW);
/*
* Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
* performance issue with some versions of gcc (4.8.4 and 5.3.0) which
* otherwise stores offset on the stack instead of in a register.
*/
- if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr)
- return -1;
+ if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
+ error = -1;
+ goto out;
+ }
rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -213,27 +248,45 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
if (desc_avail == 0) {
if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
/* Room in vring buffer is not enough */
- return -1;
+ error = -1;
+ goto out;
+ }
+ if (unlikely(desc->next >= size || ++nr_desc > size)) {
+ error = -1;
+ goto out;
}
- if (unlikely(desc->next >= size || ++nr_desc > size))
- return -1;
desc = &descs[desc->next];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+ desc->len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
desc_offset = 0;
desc_avail = desc->len;
}
cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
- cpy_len, 0);
+ if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
+ } else {
+ batch_copy[copy_nb].dst =
+ (void *)((uintptr_t)(desc_addr + desc_offset));
+ batch_copy[copy_nb].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[copy_nb].log_addr = desc->addr + desc_offset;
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
@@ -241,7 +294,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
desc_offset += cpy_len;
}
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
/**
@@ -273,17 +329,29 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(vq->enabled == 0))
return 0;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0)) {
+ if (unlikely(vring_translate(dev, vq) < 0)) {
+ count = 0;
+ goto out;
+ }
+ }
+
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
start_idx = vq->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
if (count == 0)
- return 0;
+ goto out;
LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
dev->vid, start_idx, start_idx + count);
+ vq->batch_copy_nb_elems = 0;
+
/* Retrieve all of the desc indexes first to avoid caching issues. */
rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
for (i = 0; i < count; i++) {
@@ -304,8 +372,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
descs = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem,
- vq->desc[desc_idx].addr);
+ vhost_iova_to_vva(dev,
+ vq, vq->desc[desc_idx].addr,
+ vq->desc[desc_idx].len,
+ VHOST_ACCESS_RO);
if (unlikely(!descs)) {
count = i;
break;
@@ -318,19 +388,18 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
sz = vq->size;
}
- err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
+ err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
if (unlikely(err)) {
- used_idx = (start_idx + i) & (vq->size - 1);
- vq->used->ring[used_idx].len = dev->vhost_hlen;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
+ count = i;
+ break;
}
if (i + 1 < count)
rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
}
+ do_data_copy_enqueue(dev, vq);
+
rte_smp_wmb();
*(volatile uint16_t *)&vq->used->idx += count;
@@ -346,6 +415,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
&& (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
return count;
}
@@ -364,7 +437,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
descs = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
+ vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
+ vq->desc[idx].len,
+ VHOST_ACCESS_RO);
if (unlikely(!descs))
return -1;
@@ -439,8 +514,9 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
static __rte_always_inline int
-copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
- struct buf_vector *buf_vec, uint16_t num_buffers)
+copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t num_buffers)
{
uint32_t vec_idx = 0;
uint64_t desc_addr;
@@ -449,13 +525,22 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
uint32_t cpy_len;
uint64_t hdr_addr, hdr_phys_addr;
struct rte_mbuf *hdr_mbuf;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
- if (unlikely(m == NULL))
- return -1;
+ if (unlikely(m == NULL)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
- if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
- return -1;
+ desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
+ buf_vec[vec_idx].buf_len,
+ VHOST_ACCESS_RW);
+ if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+ error = -1;
+ goto out;
+ }
hdr_mbuf = m;
hdr_addr = desc_addr;
@@ -474,10 +559,15 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
/* done with current desc buf, get the next one */
if (desc_avail == 0) {
vec_idx++;
- desc_addr = rte_vhost_gpa_to_vva(dev->mem,
- buf_vec[vec_idx].buf_addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_addr =
+ vhost_iova_to_vva(dev, vq,
+ buf_vec[vec_idx].buf_addr,
+ buf_vec[vec_idx].buf_len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
/* Prefetch buffer address. */
rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -509,13 +599,27 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
}
cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset,
- cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
- cpy_len, 0);
+
+ if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_write(dev,
+ buf_vec[vec_idx].buf_addr + desc_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
+ } else {
+ batch_copy[copy_nb].dst =
+ (void *)((uintptr_t)(desc_addr + desc_offset));
+ batch_copy[copy_nb].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[copy_nb].log_addr =
+ buf_vec[vec_idx].buf_addr + desc_offset;
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
@@ -523,7 +627,10 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
desc_offset += cpy_len;
}
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
static __rte_always_inline uint32_t
@@ -547,9 +654,18 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(vq->enabled == 0))
return 0;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
if (count == 0)
- return 0;
+ goto out;
+
+ vq->batch_copy_nb_elems = 0;
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
@@ -572,7 +688,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx],
+ if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx],
buf_vec, num_buffers) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
@@ -581,6 +697,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
vq->last_avail_idx += num_buffers;
}
+ do_data_copy_enqueue(dev, vq);
+
if (likely(vq->shadow_used_idx)) {
flush_shadow_used_ring(dev, vq);
@@ -593,6 +711,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
eventfd_write(vq->callfd, (eventfd_t)1);
}
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
return pkt_idx;
}
@@ -766,8 +888,9 @@ put_zmbuf(struct zcopy_mbuf *zmbuf)
}
static __rte_always_inline int
-copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
- uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
+copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct vring_desc *descs, uint16_t max_desc,
+ struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
{
struct vring_desc *desc;
@@ -779,15 +902,25 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
uint32_t nr_desc = 1;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
desc = &descs[desc_idx];
if (unlikely((desc->len < dev->vhost_hlen)) ||
- (desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ (desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc->addr,
+ desc->len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
if (virtio_net_with_host_offload(dev)) {
hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
@@ -802,12 +935,19 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
if (likely((desc->len == dev->vhost_hlen) &&
(desc->flags & VRING_DESC_F_NEXT) != 0)) {
desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc->addr,
+ desc->len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
desc_offset = 0;
desc_avail = desc->len;
@@ -838,7 +978,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
cur->data_len = cpy_len;
cur->data_off = 0;
cur->buf_addr = (void *)(uintptr_t)desc_addr;
- cur->buf_physaddr = hpa;
+ cur->buf_iova = hpa;
/*
* In zero copy mode, one mbuf can only reference data
@@ -846,10 +986,24 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
*/
mbuf_avail = cpy_len;
} else {
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset),
- (void *)((uintptr_t)(desc_addr + desc_offset)),
- cpy_len);
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ copy_nb >= vq->size ||
+ (hdr && cur == m))) {
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ cpy_len);
+ } else {
+ batch_copy[copy_nb].dst =
+ rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset);
+ batch_copy[copy_nb].src =
+ (void *)((uintptr_t)(desc_addr +
+ desc_offset));
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
}
mbuf_avail -= cpy_len;
@@ -863,15 +1017,24 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
break;
if (unlikely(desc->next >= max_desc ||
- ++nr_desc > max_desc))
- return -1;
+ ++nr_desc > max_desc)) {
+ error = -1;
+ goto out;
+ }
desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc->addr,
+ desc->len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -890,7 +1053,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
if (unlikely(cur == NULL)) {
RTE_LOG(ERR, VHOST_DATA, "Failed to "
"allocate memory for mbuf.\n");
- return -1;
+ error = -1;
+ goto out;
}
if (unlikely(dev->dequeue_zero_copy))
rte_mbuf_refcnt_update(cur, 1);
@@ -912,7 +1076,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
if (hdr)
vhost_dequeue_offload(hdr, m);
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
static __rte_always_inline void
@@ -1016,6 +1183,15 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (unlikely(vq->enabled == 0))
return 0;
+ vq->batch_copy_nb_elems = 0;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
int nr_updated = 0;
@@ -1115,8 +1291,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
desc = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem,
- vq->desc[desc_indexes[i]].addr);
+ vhost_iova_to_vva(dev, vq,
+ vq->desc[desc_indexes[i]].addr,
+ sizeof(*desc),
+ VHOST_ACCESS_RO);
if (unlikely(!desc))
break;
@@ -1136,7 +1314,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
break;
}
- err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool);
+ err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx,
+ mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
break;
@@ -1168,11 +1347,15 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vq->last_avail_idx += i;
if (likely(dev->dequeue_zero_copy == 0)) {
+ do_data_copy_dequeue(vq);
vq->last_used_idx += i;
update_used_idx(dev, vq, i);
}
out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
if (unlikely(rarp_mbuf != NULL)) {
/*
* Inject it to the head of "pkts" array, so that switch's mac
diff --git a/mk/machine/dpaa/rte.vars.mk b/mk/machine/dpaa/rte.vars.mk
new file mode 100644
index 00000000..356a6af5
--- /dev/null
+++ b/mk/machine/dpaa/rte.vars.mk
@@ -0,0 +1,61 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# machine:
+#
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+MACHINE_CFLAGS += -march=armv8-a+crc
+
+ifdef CONFIG_RTE_ARCH_ARM_TUNE
+MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
+endif
diff --git a/mk/machine/atm/rte.vars.mk b/mk/machine/silvermont/rte.vars.mk
index cfed1108..2079d54d 100644
--- a/mk/machine/atm/rte.vars.mk
+++ b/mk/machine/silvermont/rte.vars.mk
@@ -55,4 +55,4 @@
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-MACHINE_CFLAGS = -march=atom
+MACHINE_CFLAGS = -march=silvermont
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index c25fdd9f..6a6a7452 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -58,6 +58,7 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
#
# Order is important: from higher level to lower level
#
+_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += -lrte_flow_classify
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
@@ -66,8 +67,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
_LDLIBS-$(CONFIG_RTE_LIBRTE_GRO) += -lrte_gro
+_LDLIBS-$(CONFIG_RTE_LIBRTE_GSO) += -lrte_gso
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lrte_meter
-_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
_LDLIBS-$(CONFIG_RTE_LIBRTE_LPM) += -lrte_lpm
# librte_acl needs --whole-archive because of weak functions
_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += --whole-archive
@@ -81,29 +82,36 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_POWER) += -lrte_power
_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
_LDLIBS-$(CONFIG_RTE_LIBRTE_EFD) += -lrte_efd
-_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
_LDLIBS-y += --whole-archive
+_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
_LDLIBS-$(CONFIG_RTE_LIBRTE_HASH) += -lrte_hash
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lrte_member
_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lrte_vhost
_LDLIBS-$(CONFIG_RTE_LIBRTE_KVARGS) += -lrte_kvargs
_LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF) += -lrte_mbuf
_LDLIBS-$(CONFIG_RTE_LIBRTE_NET) += -lrte_net
_LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lrte_ethdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_SECURITY) += -lrte_security
_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool
_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += -lrte_mempool_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_RING) += -lrte_ring
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PCI) += -lrte_pci
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrte_eal
_LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE) += -lrte_cmdline
_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
+_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
endif
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += -lrte_bus_pci
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += -lrte_bus_vdev
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# plugins (link only if static libraries)
@@ -116,6 +124,11 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
_LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += -lrte_pmd_cxgbe
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += -lrte_bus_dpaa
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += -lrte_mempool_dpaa
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += -lrte_pmd_dpaa
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2
_LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
@@ -128,13 +141,17 @@ ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += -lrte_pmd_kni
endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += -lrte_pmd_lio
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs -lmlx4
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs -lmlx5
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += -lrte_pmd_mrvl -L$(LIBMUSDK_PATH)/lib -lmusdk
_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += -lrte_pmd_null
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap -lpcap
_LDLIBS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += -lrte_pmd_qede
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_RING) += -lrte_pmd_ring
+ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += -lrte_pmd_softnic
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += -lrte_pmd_sfc_efx
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2 -lsze2
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += -lrte_pmd_tap
@@ -144,7 +161,6 @@ ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
endif # $(CONFIG_RTE_LIBRTE_VHOST)
_LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
@@ -162,12 +178,19 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -lrte_pmd_zuc
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -lrte_pmd_armv8
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += -L$(LIBMUSDK_PATH)/lib -lrte_pmd_mrvl_crypto -lmusdk
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += -lrte_pmd_crypto_scheduler
ifeq ($(CONFIG_RTE_LIBRTE_FSLMC_BUS),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_pmd_dpaa2_sec
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_mempool_dpaa2
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_bus_fslmc
endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_bus_dpaa
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_pmd_dpaa_sec
+endif # CONFIG_RTE_LIBRTE_DPAA_BUS
+
endif # CONFIG_RTE_LIBRTE_CRYPTODEV
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
@@ -175,6 +198,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += -lrte_pmd_sw_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += -lrte_pmd_octeontx_ssovf
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += -lrte_pmd_dpaa2_event
+_LDLIBS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += -lrte_mempool_octeontx
+_LDLIBS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += -lrte_pmd_octeontx
endif # CONFIG_RTE_LIBRTE_EVENTDEV
ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
@@ -195,6 +220,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lnuma
@@ -270,8 +296,8 @@ LDLIBS_NAMES += $(patsubst -Wl$(comma)-l%,lib%.a,$(filter -Wl$(comma)-l%,$(LDLIB
# list of found libraries files (useful for deps). If not found, the
# library is silently ignored and dep won't be checked
-LDLIBS_FILES := $(wildcard $(foreach dir,$(LDLIBS_PATH),\
- $(addprefix $(dir)/,$(LDLIBS_NAMES))))
+LDLIBS_FILES := $(sort $(wildcard $(foreach dir,$(LDLIBS_PATH),\
+ $(addprefix $(dir)/,$(LDLIBS_NAMES)))))
#
# Compile executable file if needed
diff --git a/mk/rte.combinedlib.mk b/mk/rte.combinedlib.mk
index 449358b3..2ab7ee8a 100644
--- a/mk/rte.combinedlib.mk
+++ b/mk/rte.combinedlib.mk
@@ -42,7 +42,7 @@ endif
RTE_LIBNAME := dpdk
COMBINEDLIB := lib$(RTE_LIBNAME)$(EXT)
-LIBS := $(filter-out $(COMBINEDLIB), $(notdir $(wildcard $(RTE_OUTPUT)/lib/*$(EXT))))
+LIBS := $(filter-out $(COMBINEDLIB), $(sort $(notdir $(wildcard $(RTE_OUTPUT)/lib/*$(EXT)))))
all: FORCE
$(Q)echo "GROUP ( $(LIBS) )" > $(RTE_OUTPUT)/lib/$(COMBINEDLIB)
diff --git a/mk/rte.extsubdir.mk b/mk/rte.extsubdir.mk
index d21791b0..e2ef0132 100644
--- a/mk/rte.extsubdir.mk
+++ b/mk/rte.extsubdir.mk
@@ -52,7 +52,6 @@ $(DIRS-y):
BASE_OUTPUT=$(BASE_OUTPUT) \
CUR_SUBDIR=$(CUR_SUBDIR)/$(@) \
S=$(CURDIR)/$(@) \
- DEPDIRS="$(DEPDIRS-$@)" \
$(filter-out $(DIRS-y),$(MAKECMDGOALS))
define depdirs_rule
diff --git a/mk/rte.hostapp.mk b/mk/rte.hostapp.mk
index 5cb4909c..f58173c3 100644
--- a/mk/rte.hostapp.mk
+++ b/mk/rte.hostapp.mk
@@ -69,9 +69,9 @@ O_TO_EXE_DO = @set -e; \
-include .$(HOSTAPP).cmd
# list of .a files that are linked to this application
-LDLIBS_FILES := $(wildcard \
+LDLIBS_FILES := $(sort $(wildcard \
$(addprefix $(RTE_OUTPUT)/lib/, \
- $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS)))))
+ $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS))))))
#
# Compile executable file if needed
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index 13115d14..d0979a52 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -59,14 +59,20 @@ endif
_BUILD = $(LIB)
-_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/lib/$(LIB)
+PREINSTALL = $(SYMLINK-FILES-y)
+_INSTALL = $(INSTALL-FILES-y) $(RTE_OUTPUT)/lib/$(LIB)
_CLEAN = doclean
.PHONY: all
all: install
.PHONY: install
+ifeq ($(SYMLINK-FILES-y),)
install: build _postinstall
+else
+install: _preinstall build _postinstall
+build: _preinstall
+endif
_postinstall: build
@@ -84,13 +90,6 @@ else
_CPU_LDFLAGS := $(CPU_LDFLAGS)
endif
-# Translate DEPDIRS into LDLIBS
-# Ignore (sub)directory dependencies which do not provide an actual library
-_IGNORE_DIRS = librte_eal/% librte_compat
-_DEPDIRS = $(filter-out $(_IGNORE_DIRS),$(DEPDIRS))
-_LDDIRS = $(subst librte_ether,librte_ethdev,$(_DEPDIRS))
-LDLIBS += $(subst lib,-l,$(_LDDIRS))
-
O_TO_A = $(AR) crDs $(LIB) $(OBJS-y)
O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight
O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)"," AR $(@)")
diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk
index c0eaa350..de31b78c 100644
--- a/mk/rte.sdkdoc.mk
+++ b/mk/rte.sdkdoc.mk
@@ -93,7 +93,7 @@ $(API_EXAMPLES): api-html-clean
$(Q)mkdir -p $(@D)
@printf '/**\n' > $(API_EXAMPLES)
@printf '@page examples DPDK Example Programs\n\n' >> $(API_EXAMPLES)
- @find examples -type f -name '*.c' -printf '@example %p\n' >> $(API_EXAMPLES)
+ @find examples -type f -name '*.c' -printf '@example %p\n' | LC_ALL=C sort >> $(API_EXAMPLES)
@printf '*/\n' >> $(API_EXAMPLES)
guides-pdf-clean: guides-pdf-img-clean
diff --git a/mk/rte.shared.mk b/mk/rte.shared.mk
index 87ccf0ba..4e680bc0 100644
--- a/mk/rte.shared.mk
+++ b/mk/rte.shared.mk
@@ -85,8 +85,8 @@ LDLIBS_NAMES += $(patsubst -Wl$(comma)-l%,lib%.a,$(filter -Wl$(comma)-l%,$(LDLIB
# list of found libraries files (useful for deps). If not found, the
# library is silently ignored and dep won't be checked
-LDLIBS_FILES := $(wildcard $(foreach dir,$(LDLIBS_PATH),\
- $(addprefix $(dir)/,$(LDLIBS_NAMES))))
+LDLIBS_FILES := $(sort $(wildcard $(foreach dir,$(LDLIBS_PATH),\
+ $(addprefix $(dir)/,$(LDLIBS_NAMES)))))
#
# Archive objects in .so file if needed
diff --git a/mk/rte.subdir.mk b/mk/rte.subdir.mk
index 92f5de4c..6c393eb7 100644
--- a/mk/rte.subdir.mk
+++ b/mk/rte.subdir.mk
@@ -62,8 +62,7 @@ build: _postbuild
$(DIRS-y):
@[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@
@echo "== Build $S/$@"
- @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ \
- DEPDIRS="$(DEPDIRS-$@)" all
+ @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ all
.PHONY: clean
clean: _postclean
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index 5ba34313..fd1b5ef7 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -52,9 +52,6 @@ ExclusiveArch: i686 x86_64 aarch64
%endif
BuildRequires: kernel-devel, kernel-headers, libpcap-devel
-%ifarch i686 x86_64
-BuildRequires: xen-devel
-%endif
BuildRequires: doxygen, python-sphinx, inkscape
BuildRequires: texlive-collection-latexextra
@@ -90,9 +87,6 @@ sed -ri 's,(RTE_BUILD_SHARED_LIB=).*,\1y,' %{target}/.config
sed -ri 's,(RTE_NEXT_ABI=).*,\1n,' %{target}/.config
sed -ri 's,(LIBRTE_VHOST=).*,\1y,' %{target}/.config
sed -ri 's,(LIBRTE_PMD_PCAP=).*,\1y,' %{target}/.config
-%ifarch i686 x86_64
-sed -ri 's,(LIBRTE_PMD_XENVIRT=).*,\1y,' %{target}/.config
-%endif
make O=%{target} %{?_smp_mflags}
make O=%{target} doc
diff --git a/test/test-pipeline/config.c b/test/test-pipeline/config.c
index eebb8102..741bd307 100644
--- a/test/test-pipeline/config.c
+++ b/test/test-pipeline/config.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
diff --git a/test/test-pipeline/init.c b/test/test-pipeline/init.c
index 1457c789..79536609 100644
--- a/test/test-pipeline/init.c
+++ b/test/test-pipeline/init.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@ -194,9 +193,9 @@ app_ports_check_link(void)
for (i = 0; i < app.n_ports; i++) {
struct rte_eth_link link;
- uint8_t port;
+ uint16_t port;
- port = (uint8_t) app.ports[i];
+ port = app.ports[i];
memset(&link, 0, sizeof(link));
rte_eth_link_get_nowait(port, &link);
RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
@@ -219,10 +218,10 @@ app_init_ports(void)
/* Init NIC ports, then start the ports */
for (i = 0; i < app.n_ports; i++) {
- uint8_t port;
+ uint16_t port;
int ret;
- port = (uint8_t) app.ports[i];
+ port = app.ports[i];
RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
/* Init port */
diff --git a/test/test-pipeline/main.c b/test/test-pipeline/main.c
index 795b1aeb..a710173b 100644
--- a/test/test-pipeline/main.c
+++ b/test/test-pipeline/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
diff --git a/test/test-pipeline/main.h b/test/test-pipeline/main.h
index 36858492..26395a35 100644
--- a/test/test-pipeline/main.h
+++ b/test/test-pipeline/main.h
@@ -131,7 +131,10 @@ enum {
void app_main_loop_rx(void);
void app_main_loop_rx_metadata(void);
-uint64_t test_hash(void *key, uint32_t key_size, uint64_t seed);
+uint64_t test_hash(void *key,
+ void *key_mask,
+ uint32_t key_size,
+ uint64_t seed);
void app_main_loop_worker(void);
void app_main_loop_worker_pipeline_stub(void);
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 991e381e..edc1663c 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -169,23 +169,23 @@ app_main_loop_worker_pipeline_hash(void) {
"ring %d\n", i);
}
+ struct rte_table_hash_params table_hash_params = {
+ .name = "TABLE",
+ .key_size = key_size,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 24,
+ .n_buckets = 1 << 22,
+ .f_hash = test_hash,
+ .seed = 0,
+ };
+
/* Table configuration */
switch (app.pipeline_type) {
case e_APP_PIPELINE_HASH_KEY8_EXT:
case e_APP_PIPELINE_HASH_KEY16_EXT:
case e_APP_PIPELINE_HASH_KEY32_EXT:
{
- struct rte_table_hash_ext_params table_hash_params = {
- .key_size = key_size,
- .n_keys = 1 << 24,
- .n_buckets = 1 << 22,
- .n_buckets_ext = 1 << 21,
- .f_hash = test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_ext_ops,
.arg_create = &table_hash_params,
@@ -204,16 +204,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_KEY16_LRU:
case e_APP_PIPELINE_HASH_KEY32_LRU:
{
- struct rte_table_hash_lru_params table_hash_params = {
- .key_size = key_size,
- .n_keys = 1 << 24,
- .n_buckets = 1 << 22,
- .f_hash = test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_lru_ops,
.arg_create = &table_hash_params,
@@ -230,16 +220,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY8_EXT:
{
- struct rte_table_hash_key8_ext_params table_hash_params = {
- .n_entries = 1 << 24,
- .n_entries_ext = 1 << 23,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .key_mask = NULL,
- .f_hash = test_hash,
- .seed = 0,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key8_ext_ops,
.arg_create = &table_hash_params,
@@ -256,15 +236,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY8_LRU:
{
- struct rte_table_hash_key8_lru_params table_hash_params = {
- .n_entries = 1 << 24,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .key_mask = NULL,
- .f_hash = test_hash,
- .seed = 0,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key8_lru_ops,
.arg_create = &table_hash_params,
@@ -281,16 +252,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY16_EXT:
{
- struct rte_table_hash_key16_ext_params table_hash_params = {
- .n_entries = 1 << 24,
- .n_entries_ext = 1 << 23,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .f_hash = test_hash,
- .seed = 0,
- .key_mask = NULL,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key16_ext_ops,
.arg_create = &table_hash_params,
@@ -307,15 +268,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY16_LRU:
{
- struct rte_table_hash_key16_lru_params table_hash_params = {
- .n_entries = 1 << 24,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .f_hash = test_hash,
- .seed = 0,
- .key_mask = NULL,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key16_lru_ops,
.arg_create = &table_hash_params,
@@ -332,15 +284,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY32_EXT:
{
- struct rte_table_hash_key32_ext_params table_hash_params = {
- .n_entries = 1 << 24,
- .n_entries_ext = 1 << 23,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .f_hash = test_hash,
- .seed = 0,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key32_ext_ops,
.arg_create = &table_hash_params,
@@ -358,14 +301,6 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_SPEC_KEY32_LRU:
{
- struct rte_table_hash_key32_lru_params table_hash_params = {
- .n_entries = 1 << 24,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .f_hash = test_hash,
- .seed = 0,
- };
-
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key32_lru_ops,
.arg_create = &table_hash_params,
@@ -390,23 +325,8 @@ app_main_loop_worker_pipeline_hash(void) {
case e_APP_PIPELINE_HASH_CUCKOO_KEY112:
case e_APP_PIPELINE_HASH_CUCKOO_KEY128:
{
- char hash_name[RTE_HASH_NAMESIZE];
-
- snprintf(hash_name, sizeof(hash_name), "RTE_TH_CUCKOO_%d",
- app.pipeline_type);
-
- struct rte_table_hash_cuckoo_params table_hash_params = {
- .key_size = key_size,
- .n_keys = (1 << 24) + 1,
- .f_hash = test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .name = hash_name,
- };
-
struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_hash_cuckoo_dosig_ops,
+ .ops = &rte_table_hash_cuckoo_ops,
.arg_create = &table_hash_params,
.f_action_hit = NULL,
.f_action_miss = NULL,
@@ -477,6 +397,7 @@ app_main_loop_worker_pipeline_hash(void) {
uint64_t test_hash(
void *key,
+ __attribute__((unused)) void *key_mask,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
@@ -539,7 +460,7 @@ app_main_loop_rx_metadata(void) {
} else
continue;
- *signature = test_hash(key, 0, 0);
+ *signature = test_hash(key, NULL, 0, 0);
}
do {
diff --git a/test/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index cb02fff0..c7c45379 100644
--- a/test/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
diff --git a/test/test/Makefile b/test/test/Makefile
index 42d9a49e..bb54c980 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -94,6 +94,7 @@ SRCS-y += test_cycles.c
SRCS-y += test_spinlock.c
SRCS-y += test_memory.c
SRCS-y += test_memzone.c
+SRCS-y += test_bitmap.c
SRCS-y += test_ring.c
SRCS-y += test_ring_perf.c
@@ -106,6 +107,7 @@ SRCS-y += test_table_tables.c
SRCS-y += test_table_ports.c
SRCS-y += test_table_combined.c
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_table_acl.c
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += test_flow_classify.c
endif
SRCS-y += test_rwlock.c
@@ -123,6 +125,10 @@ SRCS-y += test_logs.c
SRCS-y += test_memcpy.c
SRCS-y += test_memcpy_perf.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_MEMBER) += test_member.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMBER) += test_member_perf.c
+
SRCS-$(CONFIG_RTE_LIBRTE_EFD) += test_efd.c
SRCS-$(CONFIG_RTE_LIBRTE_EFD) += test_efd_perf.c
@@ -198,12 +204,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c
-SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
SRCS-y += test_eventdev.c
SRCS-y += test_event_ring.c
+SRCS-y += test_event_eth_rx_adapter.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += test_eventdev_sw.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += test_eventdev_octeontx.c
endif
diff --git a/test/test/packet_burst_generator.c b/test/test/packet_burst_generator.c
index a93c3b59..8f4ddcc4 100644
--- a/test/test/packet_burst_generator.c
+++ b/test/test/packet_burst_generator.c
@@ -134,6 +134,36 @@ initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
return pkt_len;
}
+uint16_t
+initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port,
+ uint16_t dst_port, uint16_t pkt_data_len)
+{
+ uint16_t pkt_len;
+
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct tcp_hdr));
+
+ memset(tcp_hdr, 0, sizeof(struct tcp_hdr));
+ tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
+ tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
+
+ return pkt_len;
+}
+
+uint16_t
+initialize_sctp_header(struct sctp_hdr *sctp_hdr, uint16_t src_port,
+ uint16_t dst_port, uint16_t pkt_data_len)
+{
+ uint16_t pkt_len;
+
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+
+ sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
+ sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
+ sctp_hdr->tag = 0;
+ sctp_hdr->cksum = 0; /* No SCTP checksum. */
+
+ return pkt_len;
+}
uint16_t
initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
@@ -198,7 +228,53 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
return pkt_len;
}
+uint16_t
+initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+ uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
+{
+ uint16_t pkt_len;
+ unaligned_uint16_t *ptr16;
+ uint32_t ip_cksum;
+
+ /*
+ * Initialize IP header.
+ */
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
+
+ ip_hdr->version_ihl = IP_VHL_DEF;
+ ip_hdr->type_of_service = 0;
+ ip_hdr->fragment_offset = 0;
+ ip_hdr->time_to_live = IP_DEFTTL;
+ ip_hdr->next_proto_id = proto;
+ ip_hdr->packet_id = 0;
+ ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
+ ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
+ ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
+
+ /*
+ * Compute IP header checksum.
+ */
+ ptr16 = (unaligned_uint16_t *)ip_hdr;
+ ip_cksum = 0;
+ ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
+ ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
+ ip_cksum += ptr16[4];
+ ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
+ ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
+ /*
+ * Reduce 32 bit checksum to 16 bits and complement it.
+ */
+ ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
+ (ip_cksum & 0x0000FFFF);
+ ip_cksum %= 65536;
+ ip_cksum = (~ip_cksum) & 0x0000FFFF;
+ if (ip_cksum == 0)
+ ip_cksum = 0xFFFF;
+ ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
+
+ return pkt_len;
+}
/*
* The maximum number of segments per packet is used when creating
@@ -283,3 +359,118 @@ nomore_mbuf:
return nb_pkt;
}
+
+int
+generate_packet_burst_proto(struct rte_mempool *mp,
+ struct rte_mbuf **pkts_burst,
+ struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
+ uint8_t ipv4, uint8_t proto, void *proto_hdr,
+ int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
+{
+ int i, nb_pkt = 0;
+ size_t eth_hdr_size;
+
+ struct rte_mbuf *pkt_seg;
+ struct rte_mbuf *pkt;
+
+ for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+ pkt = rte_pktmbuf_alloc(mp);
+ if (pkt == NULL) {
+nomore_mbuf:
+ if (nb_pkt == 0)
+ return -1;
+ break;
+ }
+
+ pkt->data_len = pkt_len;
+ pkt_seg = pkt;
+ for (i = 1; i < nb_pkt_segs; i++) {
+ pkt_seg->next = rte_pktmbuf_alloc(mp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
+ rte_pktmbuf_free(pkt);
+ goto nomore_mbuf;
+ }
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = pkt_len;
+ }
+ pkt_seg->next = NULL; /* Last segment of packet. */
+
+ /*
+ * Copy headers in first packet segment(s).
+ */
+ if (vlan_enabled)
+ eth_hdr_size = sizeof(struct ether_hdr) +
+ sizeof(struct vlan_hdr);
+ else
+ eth_hdr_size = sizeof(struct ether_hdr);
+
+ copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
+
+ if (ipv4) {
+ copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt,
+ eth_hdr_size);
+ switch (proto) {
+ case IPPROTO_UDP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct udp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv4_hdr));
+ break;
+ case IPPROTO_TCP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct tcp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv4_hdr));
+ break;
+ case IPPROTO_SCTP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct sctp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv4_hdr));
+ break;
+ default:
+ break;
+ }
+ } else {
+ copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt,
+ eth_hdr_size);
+ switch (proto) {
+ case IPPROTO_UDP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct udp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv6_hdr));
+ break;
+ case IPPROTO_TCP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct tcp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv6_hdr));
+ break;
+ case IPPROTO_SCTP:
+ copy_buf_to_pkt(proto_hdr,
+ sizeof(struct sctp_hdr), pkt,
+ eth_hdr_size + sizeof(struct ipv6_hdr));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Complete first mbuf of packet and append it to the
+ * burst of packets to be transmitted.
+ */
+ pkt->nb_segs = nb_pkt_segs;
+ pkt->pkt_len = pkt_len;
+ pkt->l2_len = eth_hdr_size;
+
+ if (ipv4) {
+ pkt->vlan_tci = ETHER_TYPE_IPv4;
+ pkt->l3_len = sizeof(struct ipv4_hdr);
+ } else {
+ pkt->vlan_tci = ETHER_TYPE_IPv6;
+ pkt->l3_len = sizeof(struct ipv6_hdr);
+ }
+
+ pkts_burst[nb_pkt] = pkt;
+ }
+
+ return nb_pkt;
+}
diff --git a/test/test/packet_burst_generator.h b/test/test/packet_burst_generator.h
index edc10441..3315bfa6 100644
--- a/test/test/packet_burst_generator.h
+++ b/test/test/packet_burst_generator.h
@@ -43,7 +43,8 @@ extern "C" {
#include <rte_arp.h>
#include <rte_ip.h>
#include <rte_udp.h>
-
+#include <rte_tcp.h>
+#include <rte_sctp.h>
#define IPV4_ADDR(a, b, c, d)(((a & 0xff) << 24) | ((b & 0xff) << 16) | \
((c & 0xff) << 8) | (d & 0xff))
@@ -65,6 +66,13 @@ uint16_t
initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len);
+uint16_t
+initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port,
+ uint16_t dst_port, uint16_t pkt_data_len);
+
+uint16_t
+initialize_sctp_header(struct sctp_hdr *sctp_hdr, uint16_t src_port,
+ uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
@@ -74,15 +82,25 @@ uint16_t
initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
uint32_t dst_addr, uint16_t pkt_data_len);
+uint16_t
+initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+ uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto);
+
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst,
uint8_t pkt_len, uint8_t nb_pkt_segs);
+int
+generate_packet_burst_proto(struct rte_mempool *mp,
+ struct rte_mbuf **pkts_burst,
+ struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
+ uint8_t ipv4, uint8_t proto, void *proto_hdr,
+ int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
+
#ifdef __cplusplus
}
#endif
-
#endif /* PACKET_BURST_GENERATOR_H_ */
diff --git a/test/test/process.h b/test/test/process.h
index 4f8d1211..51ced122 100644
--- a/test/test/process.h
+++ b/test/test/process.h
@@ -52,11 +52,7 @@ static inline int
process_dup(const char *const argv[], int numargs, const char *env_value)
{
int num;
-#ifdef RTE_LIBRTE_XEN_DOM0
- char *argv_cpy[numargs + 2];
-#else
char *argv_cpy[numargs + 1];
-#endif
int i, fd, status;
char path[32];
@@ -67,14 +63,8 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
/* make a copy of the arguments to be passed to exec */
for (i = 0; i < numargs; i++)
argv_cpy[i] = strdup(argv[i]);
-#ifdef RTE_LIBRTE_XEN_DOM0
- argv_cpy[i] = strdup("--xen-dom0");
- argv_cpy[i + 1] = NULL;
- num = numargs + 1;
-#else
argv_cpy[i] = NULL;
num = numargs;
-#endif
/* close all open file descriptors, check /proc/self/fd to only
* call close on open fds. Exclude fds 0, 1 and 2*/
diff --git a/test/test/test.c b/test/test/test.c
index c561eb56..0e6ff7ce 100644
--- a/test/test/test.c
+++ b/test/test/test.c
@@ -50,7 +50,6 @@ extern cmdline_parse_ctx_t main_ctx[];
#endif
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_cycles.h>
#include <rte_log.h>
@@ -87,11 +86,7 @@ do_recursive_call(void)
{ "test_invalid_b_flag", no_action },
{ "test_invalid_vdev_flag", no_action },
{ "test_invalid_r_flag", no_action },
-#ifdef RTE_LIBRTE_XEN_DOM0
- { "test_dom0_misc_flags", no_action },
-#else
{ "test_misc_flags", no_action },
-#endif
{ "test_memory_flags", no_action },
{ "test_file_prefix", no_action },
{ "test_no_huge_flag", no_action },
diff --git a/test/test/test_atomic.c b/test/test/test_atomic.c
index c68723ae..39c9a595 100644
--- a/test/test/test_atomic.c
+++ b/test/test/test_atomic.c
@@ -37,7 +37,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
diff --git a/test/test/test_bitmap.c b/test/test/test_bitmap.c
new file mode 100644
index 00000000..5c9eee96
--- /dev/null
+++ b/test/test/test_bitmap.c
@@ -0,0 +1,192 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+
+#include "test.h"
+
+#define MAX_BITS 1000
+
+static int
+test_bitmap_scan_operations(struct rte_bitmap *bmp)
+{
+ uint32_t pos = 0;
+ uint64_t slab1_magic = 0xBADC0FFEEBADF00D;
+ uint64_t slab2_magic = 0xFEEDDEADDEADF00D;
+ uint64_t out_slab = 0;
+
+ rte_bitmap_reset(bmp);
+
+ rte_bitmap_set_slab(bmp, pos, slab1_magic);
+ rte_bitmap_set_slab(bmp, pos + RTE_BITMAP_SLAB_BIT_SIZE, slab2_magic);
+
+ if (!rte_bitmap_scan(bmp, &pos, &out_slab)) {
+ printf("Failed to get slab from bitmap.\n");
+ return TEST_FAILED;
+ }
+
+ if (slab1_magic != out_slab) {
+ printf("Scan operation sanity failed.\n");
+ return TEST_FAILED;
+ }
+
+ if (!rte_bitmap_scan(bmp, &pos, &out_slab)) {
+ printf("Failed to get slab from bitmap.\n");
+ return TEST_FAILED;
+ }
+
+ if (slab2_magic != out_slab) {
+ printf("Scan operation sanity failed.\n");
+ return TEST_FAILED;
+ }
+
+ /* Wrap around */
+ if (!rte_bitmap_scan(bmp, &pos, &out_slab)) {
+ printf("Failed to get slab from bitmap.\n");
+ return TEST_FAILED;
+ }
+
+ if (slab1_magic != out_slab) {
+ printf("Scan operation wrap around failed.\n");
+ return TEST_FAILED;
+ }
+
+ /* Scan reset check. */
+ __rte_bitmap_scan_init(bmp);
+
+ if (!rte_bitmap_scan(bmp, &pos, &out_slab)) {
+ printf("Failed to get slab from bitmap.\n");
+ return TEST_FAILED;
+ }
+
+ if (slab1_magic != out_slab) {
+ printf("Scan reset operation failed.\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_bitmap_slab_set_get(struct rte_bitmap *bmp)
+{
+ uint32_t pos = 0;
+ uint64_t slab_magic = 0xBADC0FFEEBADF00D;
+ uint64_t out_slab = 0;
+
+ rte_bitmap_reset(bmp);
+ rte_bitmap_set_slab(bmp, pos, slab_magic);
+
+ if (!rte_bitmap_scan(bmp, &pos, &out_slab)) {
+ printf("Failed to get slab from bitmap.\n");
+ return TEST_FAILED;
+ }
+
+
+ if (slab_magic != out_slab) {
+ printf("Invalid slab in bitmap.\n");
+ return TEST_FAILED;
+ }
+
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_bitmap_set_get_clear(struct rte_bitmap *bmp)
+{
+ int i;
+
+ rte_bitmap_reset(bmp);
+ for (i = 0; i < MAX_BITS; i++)
+ rte_bitmap_set(bmp, i);
+
+ for (i = 0; i < MAX_BITS; i++) {
+ if (!rte_bitmap_get(bmp, i)) {
+ printf("Failed to get set bit.\n");
+ return TEST_FAILED;
+ }
+ }
+
+ for (i = 0; i < MAX_BITS; i++)
+ rte_bitmap_clear(bmp, i);
+
+ for (i = 0; i < MAX_BITS; i++) {
+ if (rte_bitmap_get(bmp, i)) {
+ printf("Failed to clear set bit.\n");
+ return TEST_FAILED;
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_bitmap(void)
+{
+ void *mem;
+ uint32_t bmp_size;
+ struct rte_bitmap *bmp;
+
+ bmp_size =
+ rte_bitmap_get_memory_footprint(MAX_BITS);
+
+ mem = rte_zmalloc("test_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ printf("Failed to allocate memory for bitmap\n");
+ return TEST_FAILED;
+ }
+
+ bmp = rte_bitmap_init(MAX_BITS, mem, bmp_size);
+ if (bmp == NULL) {
+ printf("Failed to init bitmap\n");
+ return TEST_FAILED;
+ }
+
+ if (test_bitmap_set_get_clear(bmp) < 0)
+ return TEST_FAILED;
+
+ if (test_bitmap_slab_set_get(bmp) < 0)
+ return TEST_FAILED;
+
+ if (test_bitmap_scan_operations(bmp) < 0)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+REGISTER_TEST_COMMAND(bitmap_test, test_bitmap);
diff --git a/test/test/test_cfgfile.c b/test/test/test_cfgfile.c
index 4cc9b14d..37435b39 100644
--- a/test/test/test_cfgfile.c
+++ b/test/test/test_cfgfile.c
@@ -154,6 +154,43 @@ test_cfgfile_sample2(void)
}
static int
+test_cfgfile_realloc_sections(void)
+{
+ struct rte_cfgfile *cfgfile;
+ int ret;
+ const char *value;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/realloc_sections.ini", 0);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to load config file");
+
+ ret = rte_cfgfile_num_sections(cfgfile, NULL, 0);
+ TEST_ASSERT(ret == 9, "Unexpected number of sections: %d", ret);
+
+ ret = rte_cfgfile_has_section(cfgfile, "section9");
+ TEST_ASSERT(ret, "section9 missing");
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "section3");
+ TEST_ASSERT(ret == 21,
+ "section3 unexpected number of entries: %d", ret);
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "section9");
+ TEST_ASSERT(ret == 8, "section9 unexpected number of entries: %d", ret);
+
+ value = rte_cfgfile_get_entry(cfgfile, "section9", "key8");
+ TEST_ASSERT(strcmp("value8_section9", value) == 0,
+ "key unexpected value: %s", value);
+
+ ret = rte_cfgfile_save(cfgfile, "/tmp/cfgfile_save.ini");
+ TEST_ASSERT_SUCCESS(ret, "Failed to save *.ini file");
+ remove("/tmp/cfgfile_save.ini");
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
test_cfgfile_invalid_section_header(void)
{
struct rte_cfgfile *cfgfile;
@@ -292,6 +329,9 @@ test_cfgfile(void)
if (test_cfgfile_sample2())
return -1;
+ if (test_cfgfile_realloc_sections())
+ return -1;
+
if (test_cfgfile_invalid_section_header())
return -1;
diff --git a/test/test/test_cfgfiles/etc/realloc_sections.ini b/test/test/test_cfgfiles/etc/realloc_sections.ini
new file mode 100644
index 00000000..e653e40c
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/realloc_sections.ini
@@ -0,0 +1,128 @@
+[section1]
+key1=value1_section1
+key2=value2_section1
+key3=value3_section1
+key4=value4_section1
+key5=value5_section1
+key6=value6_section1
+key7=value7_section1
+key8=value8_section1
+key9=value9_section1
+key10=value10_section1
+key11=value11_section1
+key12=value12_section1
+key13=value13_section1
+key14=value14_section1
+key15=value15_section1
+key16=value16_section1
+key17=value17_section1
+key18=value18_section1
+key19=value19_section1
+key20=value20_section1
+key21=value21_section1
+
+[section2]
+key1=value1_section2
+key2=value2_section2
+key3=value3_section2
+key4=value4_section2
+key5=value5_section2
+key6=value6_section2
+key7=value7_section2
+key8=value8_section2
+key9=value9_section2
+key10=value10_section2
+key11=value11_section2
+key12=value12_section2
+key13=value13_section2
+key14=value14_section2
+key15=value15_section2
+key16=value16_section2
+key17=value17_section2
+key18=value18_section2
+key19=value19_section2
+key20=value20_section2
+key21=value21_section2
+
+[section3]
+key1=value1_section3
+key2=value2_section3
+key3=value3_section3
+key4=value4_section3
+key5=value5_section3
+key6=value6_section3
+key7=value7_section3
+key8=value8_section3
+key9=value9_section3
+key10=value10_section3
+key11=value11_section3
+key12=value12_section3
+key13=value13_section3
+key14=value14_section3
+key15=value15_section3
+key16=value16_section3
+key17=value17_section3
+key18=value18_section3
+key19=value19_section3
+key20=value20_section3
+key21=value21_section3
+
+[section4]
+key1=value1_section4
+key2=value2_section4
+key3=value3_section4
+key4=value4_section4
+key5=value5_section4
+key6=value6_section4
+key7=value7_section4
+key8=value8_section4
+
+[section5]
+key1=value1_section5
+key2=value2_section5
+key3=value3_section5
+key4=value4_section5
+key5=value5_section5
+key6=value6_section5
+key7=value7_section5
+key8=value8_section5
+
+[section6]
+key1=value1_section6
+key2=value2_section6
+key3=value3_section6
+key4=value4_section6
+key5=value5_section6
+key6=value6_section6
+key7=value7_section6
+key8=value8_section6
+
+[section7]
+key1=value1_section7
+key2=value2_section7
+key3=value3_section7
+key4=value4_section7
+key5=value5_section7
+key6=value6_section7
+key7=value7_section7
+key8=value8_section7
+
+[section8]
+key1=value1_section8
+key2=value2_section8
+key3=value3_section8
+key4=value4_section8
+key5=value5_section8
+key6=value6_section8
+key7=value7_section8
+key8=value8_section8
+
+[section9]
+key1=value1_section9
+key2=value2_section9
+key3=value3_section9
+key4=value4_section9
+key5=value5_section9
+key6=value6_section9
+key7=value7_section9
+key8=value8_section9
diff --git a/test/test/test_cpuflags.c b/test/test/test_cpuflags.c
index 0e5ebe78..08c16613 100644
--- a/test/test/test_cpuflags.c
+++ b/test/test/test_cpuflags.c
@@ -171,6 +171,9 @@ test_cpuflags(void)
printf("Check for AVX2:\t\t");
CHECK_FOR_FLAG(RTE_CPUFLAG_AVX2);
+ printf("Check for AVX512F:\t");
+ CHECK_FOR_FLAG(RTE_CPUFLAG_AVX512F);
+
printf("Check for TRBOBST:\t");
CHECK_FOR_FLAG(RTE_CPUFLAG_TRBOBST);
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index a4116c65..1bed65da 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -36,6 +36,7 @@
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_pause.h>
+#include <rte_bus_vdev.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -58,7 +59,7 @@
#include "test_cryptodev_snow3g_test_vectors.h"
#include "test_cryptodev_snow3g_hash_test_vectors.h"
#include "test_cryptodev_zuc_test_vectors.h"
-#include "test_cryptodev_gcm_test_vectors.h"
+#include "test_cryptodev_aead_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
static int gbl_driver_id;
@@ -341,6 +342,28 @@ testsuite_setup(void)
}
}
+ /* Create a MRVL device if required */
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_MRVL_PMD))) {
+#ifndef RTE_LIBRTE_PMD_MRVL_CRYPTO
+ RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO must be"
+ " enabled in config file to run this testsuite.\n");
+ return TEST_FAILED;
+#endif
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)));
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ }
+ }
+
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
if (gbl_driver_id == rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1310,7 +1333,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Set crypto operation authentication parameters */
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
@@ -1462,7 +1485,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
sym_op->m_src = ut_params->ibuf;
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
@@ -1768,6 +1791,63 @@ test_AES_cipheronly_qat_all(void)
}
static int
+test_AES_chain_dpaa_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)),
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_dpaa_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_authonly_dpaa_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_dpaa2_sec_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1816,7 +1896,7 @@ test_authonly_dpaa2_sec_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(RTE_CRYPTODEV_DPAA2_SEC_PMD)),
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1862,6 +1942,101 @@ test_AES_chain_armv8_all(void)
return TEST_SUCCESS;
}
+static int
+test_AES_chain_mrvl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_mrvl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_authonly_mrvl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_chain_mrvl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ BLKCIPHER_3DES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_mrvl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
/* ***** SNOW 3G Tests ***** */
static int
create_wireless_algo_hash_session(uint8_t dev_id,
@@ -2211,7 +2386,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2267,7 +2442,7 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2334,7 +2509,7 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2392,7 +2567,7 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -4672,6 +4847,25 @@ test_DES_cipheronly_qat_all(void)
}
static int
+test_DES_cipheronly_openssl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)),
+ BLKCIPHER_DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_DES_docsis_openssl_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4691,6 +4885,82 @@ test_DES_docsis_openssl_all(void)
}
static int
+test_DES_cipheronly_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
+ BLKCIPHER_DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_DES_docsis_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
+ BLKCIPHER_DES_DOCSIS_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_chain_dpaa_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)),
+ BLKCIPHER_3DES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_dpaa_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_3DES_chain_dpaa2_sec_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4785,10 +5055,11 @@ test_3DES_cipheronly_openssl_all(void)
return TEST_SUCCESS;
}
-/* ***** AES-GCM Tests ***** */
+/* ***** AEAD algorithm Tests ***** */
static int
-create_gcm_session(uint8_t dev_id, enum rte_crypto_aead_operation op,
+create_aead_session(uint8_t dev_id, enum rte_crypto_aead_algorithm algo,
+ enum rte_crypto_aead_operation op,
const uint8_t *key, const uint8_t key_len,
const uint16_t aad_len, const uint8_t auth_len,
uint8_t iv_len)
@@ -4803,7 +5074,7 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_aead_operation op,
/* Setup AEAD Parameters */
ut_params->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
ut_params->aead_xform.next = NULL;
- ut_params->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ ut_params->aead_xform.aead.algo = algo;
ut_params->aead_xform.aead.op = op;
ut_params->aead_xform.aead.key.data = aead_key;
ut_params->aead_xform.aead.key.length = key_len;
@@ -4827,7 +5098,8 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_aead_operation op,
}
static int
-create_gcm_xforms(struct rte_crypto_op *op,
+create_aead_xform(struct rte_crypto_op *op,
+ enum rte_crypto_aead_algorithm algo,
enum rte_crypto_aead_operation aead_op,
uint8_t *key, const uint8_t key_len,
const uint8_t aad_len, const uint8_t auth_len,
@@ -4841,7 +5113,7 @@ create_gcm_xforms(struct rte_crypto_op *op,
/* Setup AEAD Parameters */
sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
sym_op->xform->next = NULL;
- sym_op->xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ sym_op->xform->aead.algo = algo;
sym_op->xform->aead.op = aead_op;
sym_op->xform->aead.key.data = key;
sym_op->xform->aead.key.length = key_len;
@@ -4856,8 +5128,8 @@ create_gcm_xforms(struct rte_crypto_op *op,
}
static int
-create_gcm_operation(enum rte_crypto_aead_operation op,
- const struct gcm_test_data *tdata)
+create_aead_operation(enum rte_crypto_aead_operation op,
+ const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -4874,25 +5146,49 @@ create_gcm_operation(enum rte_crypto_aead_operation op,
struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
/* Append aad data */
- aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
- sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- aad_pad_len);
- TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
- "no room to append aad");
-
- sym_op->aead.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
- memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
- TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
- tdata->aad.len);
-
- /* Append IV at the end of the crypto operation*/
- uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
- uint8_t *, IV_OFFSET);
-
- rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
- TEST_HEXDUMP(stdout, "iv:", iv_ptr,
- tdata->iv.len);
+ if (tdata->algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len + 18, 16);
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ aad_pad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
+ "no room to append aad");
+
+ sym_op->aead.aad.phys_addr =
+ rte_pktmbuf_iova(ut_params->ibuf);
+ /* Copy AAD 18 bytes after the AAD pointer, according to the API */
+ memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len);
+ TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
+ tdata->aad.len);
+
+ /* Append IV at the end of the crypto operation*/
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
+
+ /* Copy IV 1 byte after the IV pointer, according to the API */
+ rte_memcpy(iv_ptr + 1, tdata->iv.data, tdata->iv.len);
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr,
+ tdata->iv.len);
+ } else {
+ aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ aad_pad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
+ "no room to append aad");
+
+ sym_op->aead.aad.phys_addr =
+ rte_pktmbuf_iova(ut_params->ibuf);
+ memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
+ TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
+ tdata->aad.len);
+
+ /* Append IV at the end of the crypto operation*/
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
+
+ rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr,
+ tdata->iv.len);
+ }
/* Append plaintext/ciphertext */
if (op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
@@ -4948,7 +5244,7 @@ create_gcm_operation(enum rte_crypto_aead_operation op,
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
memset(sym_op->aead.digest.data, 0, tdata->auth_tag.len);
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->obuf ? ut_params->obuf :
ut_params->ibuf,
plaintext_pad_len +
@@ -4958,7 +5254,7 @@ create_gcm_operation(enum rte_crypto_aead_operation op,
ut_params->ibuf, tdata->auth_tag.len);
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf,
plaintext_pad_len + aad_pad_len);
@@ -4976,7 +5272,7 @@ create_gcm_operation(enum rte_crypto_aead_operation op,
}
static int
-test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
+test_authenticated_encryption(const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -4986,8 +5282,9 @@ test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
uint16_t plaintext_pad_len;
uint32_t i;
- /* Create GCM session */
- retval = create_gcm_session(ts_params->valid_devs[0],
+ /* Create AEAD session */
+ retval = create_aead_session(ts_params->valid_devs[0],
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -4998,7 +5295,7 @@ test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
if (tdata->aad.len > MBUF_SIZE) {
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
/* Populate full size of add data */
- for (i = 32; i < GCM_MAX_AAD_LENGTH; i += 32)
+ for (i = 32; i < MAX_AAD_LENGTH; i += 32)
memcpy(&tdata->aad.data[i], &tdata->aad.data[0], 32);
} else
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -5007,8 +5304,8 @@ test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
@@ -5045,13 +5342,13 @@ test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
ciphertext,
tdata->ciphertext.data,
tdata->ciphertext.len,
- "GCM Ciphertext data not as expected");
+ "Ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
auth_tag,
tdata->auth_tag.data,
tdata->auth_tag.len,
- "GCM Generated auth tag not as expected");
+ "Generated auth tag not as expected");
return 0;
@@ -5060,143 +5357,143 @@ test_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
static int
test_AES_GCM_authenticated_encryption_test_case_1(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_1);
+ return test_authenticated_encryption(&gcm_test_case_1);
}
static int
test_AES_GCM_authenticated_encryption_test_case_2(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_2);
+ return test_authenticated_encryption(&gcm_test_case_2);
}
static int
test_AES_GCM_authenticated_encryption_test_case_3(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_3);
+ return test_authenticated_encryption(&gcm_test_case_3);
}
static int
test_AES_GCM_authenticated_encryption_test_case_4(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_4);
+ return test_authenticated_encryption(&gcm_test_case_4);
}
static int
test_AES_GCM_authenticated_encryption_test_case_5(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_5);
+ return test_authenticated_encryption(&gcm_test_case_5);
}
static int
test_AES_GCM_authenticated_encryption_test_case_6(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_6);
+ return test_authenticated_encryption(&gcm_test_case_6);
}
static int
test_AES_GCM_authenticated_encryption_test_case_7(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_7);
+ return test_authenticated_encryption(&gcm_test_case_7);
}
static int
test_AES_GCM_auth_encryption_test_case_192_1(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_1);
+ return test_authenticated_encryption(&gcm_test_case_192_1);
}
static int
test_AES_GCM_auth_encryption_test_case_192_2(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_2);
+ return test_authenticated_encryption(&gcm_test_case_192_2);
}
static int
test_AES_GCM_auth_encryption_test_case_192_3(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_3);
+ return test_authenticated_encryption(&gcm_test_case_192_3);
}
static int
test_AES_GCM_auth_encryption_test_case_192_4(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_4);
+ return test_authenticated_encryption(&gcm_test_case_192_4);
}
static int
test_AES_GCM_auth_encryption_test_case_192_5(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_5);
+ return test_authenticated_encryption(&gcm_test_case_192_5);
}
static int
test_AES_GCM_auth_encryption_test_case_192_6(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_6);
+ return test_authenticated_encryption(&gcm_test_case_192_6);
}
static int
test_AES_GCM_auth_encryption_test_case_192_7(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_192_7);
+ return test_authenticated_encryption(&gcm_test_case_192_7);
}
static int
test_AES_GCM_auth_encryption_test_case_256_1(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_1);
+ return test_authenticated_encryption(&gcm_test_case_256_1);
}
static int
test_AES_GCM_auth_encryption_test_case_256_2(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_2);
+ return test_authenticated_encryption(&gcm_test_case_256_2);
}
static int
test_AES_GCM_auth_encryption_test_case_256_3(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_3);
+ return test_authenticated_encryption(&gcm_test_case_256_3);
}
static int
test_AES_GCM_auth_encryption_test_case_256_4(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_4);
+ return test_authenticated_encryption(&gcm_test_case_256_4);
}
static int
test_AES_GCM_auth_encryption_test_case_256_5(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_5);
+ return test_authenticated_encryption(&gcm_test_case_256_5);
}
static int
test_AES_GCM_auth_encryption_test_case_256_6(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_6);
+ return test_authenticated_encryption(&gcm_test_case_256_6);
}
static int
test_AES_GCM_auth_encryption_test_case_256_7(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_256_7);
+ return test_authenticated_encryption(&gcm_test_case_256_7);
}
static int
test_AES_GCM_auth_encryption_test_case_aad_1(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_aad_1);
+ return test_authenticated_encryption(&gcm_test_case_aad_1);
}
static int
test_AES_GCM_auth_encryption_test_case_aad_2(void)
{
- return test_AES_GCM_authenticated_encryption(&gcm_test_case_aad_2);
+ return test_authenticated_encryption(&gcm_test_case_aad_2);
}
static int
-test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
+test_authenticated_decryption(const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5205,8 +5502,9 @@ test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
uint8_t *plaintext;
uint32_t i;
- /* Create GCM session */
- retval = create_gcm_session(ts_params->valid_devs[0],
+ /* Create AEAD session */
+ retval = create_aead_session(ts_params->valid_devs[0],
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_DECRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -5218,7 +5516,7 @@ test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
if (tdata->aad.len > MBUF_SIZE) {
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
/* Populate full size of add data */
- for (i = 32; i < GCM_MAX_AAD_LENGTH; i += 32)
+ for (i = 32; i < MAX_AAD_LENGTH; i += 32)
memcpy(&tdata->aad.data[i], &tdata->aad.data[0], 32);
} else
ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -5226,8 +5524,8 @@ test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
@@ -5257,154 +5555,154 @@ test_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
plaintext,
tdata->plaintext.data,
tdata->plaintext.len,
- "GCM plaintext data not as expected");
+ "Plaintext data not as expected");
TEST_ASSERT_EQUAL(ut_params->op->status,
RTE_CRYPTO_OP_STATUS_SUCCESS,
- "GCM authentication failed");
+ "Authentication failed");
return 0;
}
static int
test_AES_GCM_authenticated_decryption_test_case_1(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_1);
+ return test_authenticated_decryption(&gcm_test_case_1);
}
static int
test_AES_GCM_authenticated_decryption_test_case_2(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_2);
+ return test_authenticated_decryption(&gcm_test_case_2);
}
static int
test_AES_GCM_authenticated_decryption_test_case_3(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_3);
+ return test_authenticated_decryption(&gcm_test_case_3);
}
static int
test_AES_GCM_authenticated_decryption_test_case_4(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_4);
+ return test_authenticated_decryption(&gcm_test_case_4);
}
static int
test_AES_GCM_authenticated_decryption_test_case_5(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_5);
+ return test_authenticated_decryption(&gcm_test_case_5);
}
static int
test_AES_GCM_authenticated_decryption_test_case_6(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_6);
+ return test_authenticated_decryption(&gcm_test_case_6);
}
static int
test_AES_GCM_authenticated_decryption_test_case_7(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_7);
+ return test_authenticated_decryption(&gcm_test_case_7);
}
static int
test_AES_GCM_auth_decryption_test_case_192_1(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_1);
+ return test_authenticated_decryption(&gcm_test_case_192_1);
}
static int
test_AES_GCM_auth_decryption_test_case_192_2(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_2);
+ return test_authenticated_decryption(&gcm_test_case_192_2);
}
static int
test_AES_GCM_auth_decryption_test_case_192_3(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_3);
+ return test_authenticated_decryption(&gcm_test_case_192_3);
}
static int
test_AES_GCM_auth_decryption_test_case_192_4(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_4);
+ return test_authenticated_decryption(&gcm_test_case_192_4);
}
static int
test_AES_GCM_auth_decryption_test_case_192_5(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_5);
+ return test_authenticated_decryption(&gcm_test_case_192_5);
}
static int
test_AES_GCM_auth_decryption_test_case_192_6(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_6);
+ return test_authenticated_decryption(&gcm_test_case_192_6);
}
static int
test_AES_GCM_auth_decryption_test_case_192_7(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_192_7);
+ return test_authenticated_decryption(&gcm_test_case_192_7);
}
static int
test_AES_GCM_auth_decryption_test_case_256_1(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_1);
+ return test_authenticated_decryption(&gcm_test_case_256_1);
}
static int
test_AES_GCM_auth_decryption_test_case_256_2(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_2);
+ return test_authenticated_decryption(&gcm_test_case_256_2);
}
static int
test_AES_GCM_auth_decryption_test_case_256_3(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_3);
+ return test_authenticated_decryption(&gcm_test_case_256_3);
}
static int
test_AES_GCM_auth_decryption_test_case_256_4(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_4);
+ return test_authenticated_decryption(&gcm_test_case_256_4);
}
static int
test_AES_GCM_auth_decryption_test_case_256_5(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_5);
+ return test_authenticated_decryption(&gcm_test_case_256_5);
}
static int
test_AES_GCM_auth_decryption_test_case_256_6(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_6);
+ return test_authenticated_decryption(&gcm_test_case_256_6);
}
static int
test_AES_GCM_auth_decryption_test_case_256_7(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_256_7);
+ return test_authenticated_decryption(&gcm_test_case_256_7);
}
static int
test_AES_GCM_auth_decryption_test_case_aad_1(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_aad_1);
+ return test_authenticated_decryption(&gcm_test_case_aad_1);
}
static int
test_AES_GCM_auth_decryption_test_case_aad_2(void)
{
- return test_AES_GCM_authenticated_decryption(&gcm_test_case_aad_2);
+ return test_authenticated_decryption(&gcm_test_case_aad_2);
}
static int
-test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
+test_authenticated_encryption_oop(const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5413,8 +5711,9 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
uint8_t *ciphertext, *auth_tag;
uint16_t plaintext_pad_len;
- /* Create GCM session */
- retval = create_gcm_session(ts_params->valid_devs[0],
+ /* Create AEAD session */
+ retval = create_aead_session(ts_params->valid_devs[0],
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -5431,8 +5730,8 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->obuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
@@ -5462,13 +5761,13 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
ciphertext,
tdata->ciphertext.data,
tdata->ciphertext.len,
- "GCM Ciphertext data not as expected");
+ "Ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
auth_tag,
tdata->auth_tag.data,
tdata->auth_tag.len,
- "GCM Generated auth tag not as expected");
+ "Generated auth tag not as expected");
return 0;
@@ -5477,11 +5776,11 @@ test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
static int
test_AES_GCM_authenticated_encryption_oop_test_case_1(void)
{
- return test_AES_GCM_authenticated_encryption_oop(&gcm_test_case_5);
+ return test_authenticated_encryption_oop(&gcm_test_case_5);
}
static int
-test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
+test_authenticated_decryption_oop(const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5489,8 +5788,9 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
int retval;
uint8_t *plaintext;
- /* Create GCM session */
- retval = create_gcm_session(ts_params->valid_devs[0],
+ /* Create AEAD session */
+ retval = create_aead_session(ts_params->valid_devs[0],
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_DECRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -5507,8 +5807,8 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->obuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
@@ -5534,23 +5834,23 @@ test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
plaintext,
tdata->plaintext.data,
tdata->plaintext.len,
- "GCM plaintext data not as expected");
+ "Plaintext data not as expected");
TEST_ASSERT_EQUAL(ut_params->op->status,
RTE_CRYPTO_OP_STATUS_SUCCESS,
- "GCM authentication failed");
+ "Authentication failed");
return 0;
}
static int
test_AES_GCM_authenticated_decryption_oop_test_case_1(void)
{
- return test_AES_GCM_authenticated_decryption_oop(&gcm_test_case_5);
+ return test_authenticated_decryption_oop(&gcm_test_case_5);
}
static int
-test_AES_GCM_authenticated_encryption_sessionless(
- const struct gcm_test_data *tdata)
+test_authenticated_encryption_sessionless(
+ const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5566,14 +5866,15 @@ test_AES_GCM_authenticated_encryption_sessionless(
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
- /* Create GCM xforms */
+ /* Create GCM xform */
memcpy(key, tdata->key.data, tdata->key.len);
- retval = create_gcm_xforms(ut_params->op,
+ retval = create_aead_xform(ut_params->op,
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_ENCRYPT,
key, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -5610,13 +5911,13 @@ test_AES_GCM_authenticated_encryption_sessionless(
ciphertext,
tdata->ciphertext.data,
tdata->ciphertext.len,
- "GCM Ciphertext data not as expected");
+ "Ciphertext data not as expected");
TEST_ASSERT_BUFFERS_ARE_EQUAL(
auth_tag,
tdata->auth_tag.data,
tdata->auth_tag.len,
- "GCM Generated auth tag not as expected");
+ "Generated auth tag not as expected");
return 0;
@@ -5625,13 +5926,13 @@ test_AES_GCM_authenticated_encryption_sessionless(
static int
test_AES_GCM_authenticated_encryption_sessionless_test_case_1(void)
{
- return test_AES_GCM_authenticated_encryption_sessionless(
+ return test_authenticated_encryption_sessionless(
&gcm_test_case_5);
}
static int
-test_AES_GCM_authenticated_decryption_sessionless(
- const struct gcm_test_data *tdata)
+test_authenticated_decryption_sessionless(
+ const struct aead_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -5646,14 +5947,15 @@ test_AES_GCM_authenticated_decryption_sessionless(
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Create GCM operation */
- retval = create_gcm_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
+ /* Create AEAD operation */
+ retval = create_aead_operation(RTE_CRYPTO_AEAD_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
- /* Create GCM xforms */
+ /* Create AEAD xform */
memcpy(key, tdata->key.data, tdata->key.len);
- retval = create_gcm_xforms(ut_params->op,
+ retval = create_aead_xform(ut_params->op,
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_DECRYPT,
key, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -5686,22 +5988,130 @@ test_AES_GCM_authenticated_decryption_sessionless(
plaintext,
tdata->plaintext.data,
tdata->plaintext.len,
- "GCM plaintext data not as expected");
+ "Plaintext data not as expected");
TEST_ASSERT_EQUAL(ut_params->op->status,
RTE_CRYPTO_OP_STATUS_SUCCESS,
- "GCM authentication failed");
+ "Authentication failed");
return 0;
}
static int
test_AES_GCM_authenticated_decryption_sessionless_test_case_1(void)
{
- return test_AES_GCM_authenticated_decryption_sessionless(
+ return test_authenticated_decryption_sessionless(
&gcm_test_case_5);
}
static int
+test_AES_CCM_authenticated_encryption_test_case_128_1(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_128_1);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_128_2(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_128_2);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_128_3(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_128_3);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_128_1(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_128_1);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_128_2(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_128_2);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_128_3(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_128_3);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_192_1(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_192_1);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_192_2(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_192_2);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_192_3(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_192_3);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_192_1(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_192_1);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_192_2(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_192_2);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_192_3(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_192_3);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_256_1(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_256_1);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_256_2(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_256_2);
+}
+
+static int
+test_AES_CCM_authenticated_encryption_test_case_256_3(void)
+{
+ return test_authenticated_encryption(&ccm_test_case_256_3);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_256_1(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_256_1);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_256_2(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_256_2);
+}
+
+static int
+test_AES_CCM_authenticated_decryption_test_case_256_3(void)
+{
+ return test_authenticated_decryption(&ccm_test_case_256_3);
+}
+
+static int
test_stats(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -5817,7 +6227,7 @@ static int MD5_HMAC_create_op(struct crypto_unittest_params *ut_params,
ut_params->ibuf, MD5_DIGEST_LEN);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -6553,7 +6963,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -7075,7 +7485,7 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->plaintext.len);
if (auth_generate)
@@ -7122,7 +7532,7 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
@@ -7175,7 +7585,7 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
@@ -7406,8 +7816,8 @@ test_authenticated_decryption_fail_when_corruption(
}
static int
-create_gcm_operation_SGL(enum rte_crypto_aead_operation op,
- const struct gcm_test_data *tdata,
+create_aead_operation_SGL(enum rte_crypto_aead_operation op,
+ const struct aead_test_data *tdata,
void *digest_mem, uint64_t digest_phys)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -7415,7 +7825,7 @@ create_gcm_operation_SGL(enum rte_crypto_aead_operation op,
const unsigned int auth_tag_len = tdata->auth_tag.len;
const unsigned int iv_len = tdata->iv.len;
- const unsigned int aad_len = tdata->aad.len;
+ unsigned int aad_len = tdata->aad.len;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -7440,24 +7850,50 @@ create_gcm_operation_SGL(enum rte_crypto_aead_operation op,
auth_tag_len);
}
- uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
- uint8_t *, IV_OFFSET);
+ /* Append aad data */
+ if (tdata->algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
- rte_memcpy(iv_ptr, tdata->iv.data, iv_len);
+ /* Copy IV 1 byte after the IV pointer, according to the API */
+ rte_memcpy(iv_ptr + 1, tdata->iv.data, iv_len);
- sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_len);
- TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
- "no room to prepend aad");
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
+ aad_len = RTE_ALIGN_CEIL(aad_len + 18, 16);
- memset(sym_op->aead.aad.data, 0, aad_len);
- rte_memcpy(sym_op->aead.aad.data, tdata->aad.data, aad_len);
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, aad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
+ "no room to prepend aad");
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
+ ut_params->ibuf);
- TEST_HEXDUMP(stdout, "iv:", iv_ptr, iv_len);
- TEST_HEXDUMP(stdout, "aad:",
- sym_op->aead.aad.data, aad_len);
+ memset(sym_op->aead.aad.data, 0, aad_len);
+ /* Copy AAD 18 bytes after the AAD pointer, according to the API */
+ rte_memcpy(sym_op->aead.aad.data, tdata->aad.data, aad_len);
+
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr, iv_len);
+ TEST_HEXDUMP(stdout, "aad:",
+ sym_op->aead.aad.data, aad_len);
+ } else {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+ uint8_t *, IV_OFFSET);
+
+ rte_memcpy(iv_ptr, tdata->iv.data, iv_len);
+
+ sym_op->aead.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, aad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
+ "no room to prepend aad");
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
+ ut_params->ibuf);
+
+ memset(sym_op->aead.aad.data, 0, aad_len);
+ rte_memcpy(sym_op->aead.aad.data, tdata->aad.data, aad_len);
+
+ TEST_HEXDUMP(stdout, "iv:", iv_ptr, iv_len);
+ TEST_HEXDUMP(stdout, "aad:",
+ sym_op->aead.aad.data, aad_len);
+ }
sym_op->aead.data.length = tdata->plaintext.len;
sym_op->aead.data.offset = aad_len;
@@ -7468,7 +7904,7 @@ create_gcm_operation_SGL(enum rte_crypto_aead_operation op,
#define SGL_MAX_NO 16
static int
-test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
+test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
const int oop, uint32_t fragsz, uint32_t fragsz_oop)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -7513,8 +7949,9 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
buf_oop = ut_params->obuf;
}
- /* Create GCM session */
- retval = create_gcm_session(ts_params->valid_devs[0],
+ /* Create AEAD session */
+ retval = create_aead_session(ts_params->valid_devs[0],
+ tdata->algo,
RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->auth_tag.len,
@@ -7594,7 +8031,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
digest_mem = rte_pktmbuf_append(ut_params->obuf,
tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(
+ digest_phys = rte_pktmbuf_iova_offset(
ut_params->obuf,
tdata->plaintext.len + prepend_len);
}
@@ -7632,19 +8069,19 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
* Place digest at the end of the last buffer
*/
if (!digest_phys)
- digest_phys = rte_pktmbuf_mtophys(buf) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf) + to_trn;
if (oop && buf_last_oop)
- digest_phys = rte_pktmbuf_mtophys(buf_last_oop) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf_last_oop) + to_trn;
if (!digest_mem && !oop) {
digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(ut_params->ibuf,
+ digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
tdata->plaintext.len);
}
- /* Create GCM opertaion */
- retval = create_gcm_operation_SGL(RTE_CRYPTO_AEAD_OP_ENCRYPT,
+ /* Create AEAD operation */
+ retval = create_aead_operation_SGL(RTE_CRYPTO_AEAD_OP_ENCRYPT,
tdata, digest_mem, digest_phys);
if (retval < 0)
@@ -7678,7 +8115,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
ciphertext,
tdata->ciphertext.data,
fragsz,
- "GCM Ciphertext data not as expected");
+ "Ciphertext data not as expected");
buf = ut_params->op->sym->m_src->next;
if (oop)
@@ -7695,7 +8132,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
ciphertext,
tdata->ciphertext.data + off,
to_trn_tbl[ecx],
- "GCM Ciphertext data not as expected");
+ "Ciphertext data not as expected");
off += to_trn_tbl[ecx++];
buf = buf->next;
@@ -7706,7 +8143,7 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
auth_tag,
tdata->auth_tag.data,
tdata->auth_tag.len,
- "GCM Generated auth tag not as expected");
+ "Generated auth tag not as expected");
return 0;
}
@@ -7717,21 +8154,21 @@ test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
static int
test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B(void)
{
- return test_AES_GCM_authenticated_encryption_SGL(
+ return test_authenticated_encryption_SGL(
&gcm_test_case_SGL_1, OUT_OF_PLACE, 400, 400);
}
static int
test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B(void)
{
- return test_AES_GCM_authenticated_encryption_SGL(
+ return test_authenticated_encryption_SGL(
&gcm_test_case_SGL_1, OUT_OF_PLACE, 1500, 2000);
}
static int
test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg(void)
{
- return test_AES_GCM_authenticated_encryption_SGL(
+ return test_authenticated_encryption_SGL(
&gcm_test_case_8, OUT_OF_PLACE, 400,
gcm_test_case_8.plaintext.len);
}
@@ -7740,7 +8177,7 @@ static int
test_AES_GCM_auth_encrypt_SGL_in_place_1500B(void)
{
- return test_AES_GCM_authenticated_encryption_SGL(
+ return test_authenticated_encryption_SGL(
&gcm_test_case_SGL_1, IN_PLACE, 1500, 0);
}
@@ -8037,6 +8474,22 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_stats),
+ /** AES CCM Authenticated Encryption 128 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_3),
+
+ /** AES CCM Authenticated Decryption 128 bits key*/
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_3),
+
/** AES GCM Authenticated Encryption */
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
@@ -8289,6 +8742,10 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_mb_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_docsis_mb_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_cipheronly_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_docsis_mb_all),
TEST_CASES_END() /**< NULL terminate unit test array */
}
@@ -8311,6 +8768,8 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_3DES_cipheronly_openssl_all),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_cipheronly_openssl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_DES_docsis_openssl_all),
TEST_CASE_ST(ut_setup, ut_teardown,
test_authonly_openssl_all),
@@ -8430,6 +8889,54 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GMAC_authentication_verify_test_case_4),
+ /** AES CCM Authenticated Encryption 128 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_3),
+
+ /** AES CCM Authenticated Decryption 128 bits key*/
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_3),
+
+ /** AES CCM Authenticated Encryption 192 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_192_3),
+
+ /** AES CCM Authenticated Decryption 192 bits key*/
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_192_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_192_3),
+
+ /** AES CCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_256_3),
+
+ /** AES CCM Authenticated Decryption 256 bits key*/
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_256_3),
+
/** Scatter-Gather */
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
@@ -8779,6 +9286,101 @@ static struct unit_test_suite cryptodev_sw_zuc_testsuite = {
}
};
+static struct unit_test_suite cryptodev_dpaa_sec_testsuite = {
+ .suite_name = "Crypto DPAA_SEC Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_device_configure_invalid_dev_id),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_multi_session),
+
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_dpaa_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_dpaa_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_dpaa_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_dpaa_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_dpaa_sec_all),
+
+ /** AES GCM Authenticated Encryption */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_7),
+
+ /** AES GCM Authenticated Decryption */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_7),
+
+ /** AES GCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_7),
+
+ /** AES GCM Authenticated Decryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_7),
+
+ /** Out of place tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_oop_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_oop_test_case_1),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
.suite_name = "Crypto DPAA2_SEC Unit Test Suite",
.setup = testsuite_setup,
@@ -8896,6 +9498,12 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GCM_auth_decryption_test_case_256_7),
+ /** Out of place tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_oop_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_oop_test_case_1),
+
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -8939,6 +9547,40 @@ static struct unit_test_suite cryptodev_armv8_testsuite = {
}
};
+static struct unit_test_suite cryptodev_mrvl_testsuite = {
+ .suite_name = "Crypto Device Marvell Component Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_multi_session_random_usage),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_mrvl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_mrvl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_mrvl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_mrvl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_mrvl_all),
+
+ /** Negative tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
@@ -9083,6 +9725,22 @@ test_cryptodev_armv8(void)
return unit_test_suite_runner(&cryptodev_armv8_testsuite);
}
+static int
+test_cryptodev_mrvl(void)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "MRVL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_mrvl_testsuite);
+}
+
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
static int
@@ -9127,6 +9785,22 @@ test_cryptodev_dpaa2_sec(void /*argv __rte_unused, int argc __rte_unused*/)
return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
}
+static int
+test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "DPAA SEC PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_DPAA_SEC is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
+}
+
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9136,4 +9810,6 @@ REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g);
REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi);
REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc);
REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
+REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
+REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 4509a09a..26bfbe60 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -88,6 +88,7 @@
#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
/**
* Write (spread) data from buffer to mbuf data
@@ -152,18 +153,18 @@ pktmbuf_mtod_offset(struct rte_mbuf *mbuf, int offset) {
return rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
}
-static inline phys_addr_t
-pktmbuf_mtophys_offset(struct rte_mbuf *mbuf, int offset) {
+static inline rte_iova_t
+pktmbuf_iova_offset(struct rte_mbuf *mbuf, int offset) {
struct rte_mbuf *m;
for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
offset -= m->data_len;
if (m == NULL) {
- printf("pktmbuf_mtophys_offset: offset out of buffer\n");
+ printf("pktmbuf_iova_offset: offset out of buffer\n");
return 0;
}
- return rte_pktmbuf_mtophys_offset(m, offset);
+ return rte_pktmbuf_iova_offset(m, offset);
}
static inline struct rte_mbuf *
diff --git a/test/test/test_cryptodev_gcm_test_vectors.h b/test/test/test_cryptodev_aead_test_vectors.h
index 7879c35b..e760ef93 100644
--- a/test/test/test_cryptodev_gcm_test_vectors.h
+++ b/test/test/test_cryptodev_aead_test_vectors.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,23 +30,34 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
-#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#ifndef TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_
#define GMAC_LARGE_PLAINTEXT_LENGTH 65344
-#define GCM_MAX_AAD_LENGTH 65536
+#define MAX_AAD_LENGTH 65536
#define GCM_LARGE_AAD_LENGTH 65296
-static uint8_t gcm_aad_zero_text[GCM_MAX_AAD_LENGTH] = { 0 };
+static uint8_t gcm_aad_zero_text[MAX_AAD_LENGTH] = { 0 };
-static uint8_t gcm_aad_text[GCM_MAX_AAD_LENGTH] = {
+static uint8_t gcm_aad_text[MAX_AAD_LENGTH] = {
0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
+static uint8_t ccm_aad_test_1[8] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+};
+
+static uint8_t ccm_aad_test_2[22] = {
+ 0x08, 0x40, 0x0F, 0xD2, 0xE1, 0x28, 0xA5, 0x7C,
+ 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xAB, 0xAE,
+ 0xA5, 0xB8, 0xFC, 0xBA, 0x00, 0x00
+};
+
+struct aead_test_data {
+ enum rte_crypto_aead_algorithm algo;
-struct gcm_test_data {
struct {
uint8_t data[64];
unsigned len;
@@ -101,8 +112,9 @@ struct gmac_test_data {
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_1 = {
+/** AES-GCM-128 Test Vectors */
+static const struct aead_test_data gcm_test_case_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -138,8 +150,8 @@ static const struct gcm_test_data gcm_test_case_1 = {
}
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_2 = {
+static const struct aead_test_data gcm_test_case_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -176,8 +188,8 @@ static const struct gcm_test_data gcm_test_case_2 = {
}
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_3 = {
+static const struct aead_test_data gcm_test_case_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -227,8 +239,8 @@ static const struct gcm_test_data gcm_test_case_3 = {
}
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_4 = {
+static const struct aead_test_data gcm_test_case_4 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -282,8 +294,8 @@ static const struct gcm_test_data gcm_test_case_4 = {
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_5 = {
+static const struct aead_test_data gcm_test_case_5 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -337,8 +349,8 @@ static const struct gcm_test_data gcm_test_case_5 = {
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_6 = {
+static const struct aead_test_data gcm_test_case_6 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -392,8 +404,8 @@ static const struct gcm_test_data gcm_test_case_6 = {
}
};
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_7 = {
+static const struct aead_test_data gcm_test_case_7 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -447,7 +459,8 @@ static const struct gcm_test_data gcm_test_case_7 = {
}
};
-static const struct gcm_test_data gcm_test_case_8 = {
+static const struct aead_test_data gcm_test_case_8 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -997,8 +1010,9 @@ static const struct gcm_test_data gcm_test_case_8 = {
}
};
-/** AES-192 Test Vectors */
-static const struct gcm_test_data gcm_test_case_192_1 = {
+/** AES-GCM-192 Test Vectors */
+static const struct aead_test_data gcm_test_case_192_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1035,7 +1049,8 @@ static const struct gcm_test_data gcm_test_case_192_1 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_2 = {
+static const struct aead_test_data gcm_test_case_192_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1079,7 +1094,8 @@ static const struct gcm_test_data gcm_test_case_192_2 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_3 = {
+static const struct aead_test_data gcm_test_case_192_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
@@ -1134,7 +1150,8 @@ static const struct gcm_test_data gcm_test_case_192_3 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_4 = {
+static const struct aead_test_data gcm_test_case_192_4 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
@@ -1189,7 +1206,8 @@ static const struct gcm_test_data gcm_test_case_192_4 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_5 = {
+static const struct aead_test_data gcm_test_case_192_5 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
@@ -1244,7 +1262,8 @@ static const struct gcm_test_data gcm_test_case_192_5 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_6 = {
+static const struct aead_test_data gcm_test_case_192_6 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
@@ -1299,7 +1318,8 @@ static const struct gcm_test_data gcm_test_case_192_6 = {
}
};
-static const struct gcm_test_data gcm_test_case_192_7 = {
+static const struct aead_test_data gcm_test_case_192_7 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xFE, 0xFF, 0xE9, 0x92, 0x86, 0x65, 0x73, 0x1C,
@@ -1354,8 +1374,9 @@ static const struct gcm_test_data gcm_test_case_192_7 = {
}
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_1 = {
+/** AES-GCM-256 Test Vectors */
+static const struct aead_test_data gcm_test_case_256_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1390,8 +1411,8 @@ static const struct gcm_test_data gcm_test_case_256_1 = {
}
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_2 = {
+static const struct aead_test_data gcm_test_case_256_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1430,8 +1451,8 @@ static const struct gcm_test_data gcm_test_case_256_2 = {
}
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_3 = {
+static const struct aead_test_data gcm_test_case_256_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1482,8 +1503,8 @@ static const struct gcm_test_data gcm_test_case_256_3 = {
}
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_4 = {
+static const struct aead_test_data gcm_test_case_256_4 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1535,8 +1556,8 @@ static const struct gcm_test_data gcm_test_case_256_4 = {
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_5 = {
+static const struct aead_test_data gcm_test_case_256_5 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1588,8 +1609,8 @@ static const struct gcm_test_data gcm_test_case_256_5 = {
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_6 = {
+static const struct aead_test_data gcm_test_case_256_6 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1640,8 +1661,8 @@ static const struct gcm_test_data gcm_test_case_256_6 = {
}
};
-/** AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_256_7 = {
+static const struct aead_test_data gcm_test_case_256_7 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1692,8 +1713,9 @@ static const struct gcm_test_data gcm_test_case_256_7 = {
}
};
-/** variable AAD AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_aad_1 = {
+/** variable AAD AES-GCM-128 Test Vectors */
+static const struct aead_test_data gcm_test_case_aad_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -1744,8 +1766,9 @@ static const struct gcm_test_data gcm_test_case_aad_1 = {
}
};
-/** variable AAD AES-256 Test Vectors */
-static const struct gcm_test_data gcm_test_case_aad_2 = {
+/** variable AAD AES-GCM-256 Test Vectors */
+static const struct aead_test_data gcm_test_case_aad_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -2567,7 +2590,8 @@ static const struct gmac_test_data gmac_test_case_4 = {
}
};
-static const struct gcm_test_data gcm_test_case_SGL_1 = {
+static const struct aead_test_data gcm_test_case_SGL_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
.key = {
.data = {
0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
@@ -3384,4 +3408,396 @@ static const struct gcm_test_data gcm_test_case_SGL_1 = {
}
};
-#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
+/** AES-CCM-128 Test Vectors */
+static const struct aead_test_data ccm_test_case_128_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+ },
+ .len = 7
+ },
+ .aad = {
+ .data = ccm_aad_test_1,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x20, 0x21, 0x22, 0x23
+ },
+ .len = 4
+ },
+ .ciphertext = {
+ .data = {
+ 0x71, 0x62, 0x01, 0x5B
+ },
+ .len = 4
+ },
+ .auth_tag = {
+ .data = {
+ 0x4D, 0xAC, 0x25, 0x5D
+ },
+ .len = 4
+ }
+};
+
+static const struct aead_test_data ccm_test_case_128_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = ccm_aad_test_2,
+ .len = 22
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0x78, 0x45, 0xCE, 0x0B, 0x16, 0xF9, 0x76, 0x23
+ },
+ .len = 8
+ }
+};
+
+static const struct aead_test_data ccm_test_case_128_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0xF3, 0xD0, 0xA2, 0xFE, 0x9A, 0x3D, 0xBF, 0x23,
+ 0x42, 0xA6, 0x43, 0xE4, 0x32, 0x46, 0xE8, 0x0C,
+ 0x3C, 0x04, 0xD0, 0x19
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0x41, 0x83, 0x21, 0x89, 0xA3, 0xD3, 0x1B, 0x43
+ },
+ .len = 8
+ }
+};
+
+/** AES-CCM-192 Test Vectors */
+static const struct aead_test_data ccm_test_case_192_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+ },
+ .len = 7
+ },
+ .aad = {
+ .data = ccm_aad_test_1,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x20, 0x21, 0x22, 0x23
+ },
+ .len = 4
+ },
+ .ciphertext = {
+ .data = {
+ 0x18, 0xEE, 0x17, 0x30
+ },
+ .len = 4
+ },
+ .auth_tag = {
+ .data = {
+ 0xC8, 0xC3, 0x26, 0xD5
+ },
+ .len = 4
+ }
+};
+
+static const struct aead_test_data ccm_test_case_192_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = ccm_aad_test_2,
+ .len = 22
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0x41, 0xC6, 0x2D, 0xD5, 0x31, 0xF2, 0xD5, 0xC8,
+ 0xCC, 0x57, 0x01, 0x2E, 0x7E, 0x2B, 0xF1, 0x26,
+ 0x6A, 0xC7, 0xCB, 0xA5
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0x77, 0xA3, 0x41, 0xD5, 0x2A, 0xE3, 0x25, 0x37
+ },
+ .len = 8
+ }
+};
+
+static const struct aead_test_data ccm_test_case_192_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0x41, 0xC6, 0x2D, 0xD5, 0x31, 0xF2, 0xD5, 0xC8,
+ 0xCC, 0x57, 0x01, 0x2E, 0x7E, 0x2B, 0xF1, 0x26,
+ 0x6A, 0xC7, 0xCB, 0xA5
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0x84, 0x72, 0x6E, 0xE7, 0x8E, 0x8E, 0x3A, 0xC6
+ },
+ .len = 8
+ }
+};
+
+/** AES-CCM-256 Test Vectors */
+static const struct aead_test_data ccm_test_case_256_1 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16
+ },
+ .len = 7
+ },
+ .aad = {
+ .data = ccm_aad_test_1,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x20, 0x21, 0x22, 0x23
+ },
+ .len = 4
+ },
+ .ciphertext = {
+ .data = {
+ 0x8A, 0xB1, 0xA8, 0x74
+ },
+ .len = 4
+ },
+ .auth_tag = {
+ .data = {
+ 0x95, 0xFC, 0x08, 0x20
+ },
+ .len = 4
+ }
+};
+
+static const struct aead_test_data ccm_test_case_256_2 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = ccm_aad_test_2,
+ .len = 22
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0x25, 0x82, 0x89, 0x09, 0x3E, 0x39, 0x1F, 0x16,
+ 0xD2, 0x82, 0x3D, 0xF6, 0xCE, 0x97, 0x72, 0x07,
+ 0xEC, 0x23, 0x17, 0x98
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0xAB, 0x02, 0xE9, 0xD1, 0x16, 0x69, 0xED, 0x0A
+ },
+ .len = 8
+ }
+};
+
+static const struct aead_test_data ccm_test_case_256_3 = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .key = {
+ .data = {
+ 0xC9, 0x7C, 0x1F, 0x67, 0xCE, 0x37, 0x11, 0x85,
+ 0x51, 0x4A, 0x8A, 0x19, 0xF2, 0xBD, 0xD5, 0x2F,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x50, 0x30, 0xF1, 0x84, 0x44, 0x08, 0xB5,
+ 0x03, 0x97, 0x76, 0xE7, 0x0C
+ },
+ .len = 13
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 0xAE,
+ 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 0xA8, 0xEB,
+ 0x7E, 0x78, 0xA0, 0x50
+ },
+ .len = 20
+ },
+ .ciphertext = {
+ .data = {
+ 0x25, 0x82, 0x89, 0x09, 0x3E, 0x39, 0x1F, 0x16,
+ 0xD2, 0x82, 0x3D, 0xF6, 0xCE, 0x97, 0x72, 0x07,
+ 0xEC, 0x23, 0x17, 0x98
+ },
+ .len = 20
+ },
+ .auth_tag = {
+ .data = {
+ 0x15, 0x80, 0xC4, 0xC9, 0x3F, 0xAB, 0x2A, 0xFD
+ },
+ .len = 8
+ }
+};
+#endif /* TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_ */
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index e410018f..9c13041d 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1197,7 +1197,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1208,7 +1210,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1245,7 +1249,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1256,7 +1262,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1267,14 +1275,17 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
"(short buffers)",
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1298,14 +1309,17 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
"Verify (short buffers)",
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest",
@@ -1316,14 +1330,17 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
"(short buffers)",
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1335,14 +1352,17 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
"Verify (short buffers)",
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest",
@@ -1352,7 +1372,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1383,7 +1405,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1421,7 +1445,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1431,7 +1457,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Encryption Digest",
@@ -1441,7 +1469,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1452,7 +1481,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1462,7 +1492,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1473,7 +1505,9 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1505,7 +1539,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CBC Decryption",
@@ -1515,7 +1551,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-192-CBC Encryption",
@@ -1525,7 +1563,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1543,7 +1582,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-256-CBC Encryption",
@@ -1553,7 +1593,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-256-CBC Decryption",
@@ -1563,7 +1605,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-256-CBC OOP Encryption",
@@ -1571,7 +1615,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-256-CBC OOP Decryption",
@@ -1579,7 +1625,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-128-CTR Encryption",
@@ -1589,7 +1637,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CTR Decryption",
@@ -1599,7 +1649,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-192-CTR Encryption",
@@ -1609,7 +1661,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-192-CTR Decryption",
@@ -1619,7 +1672,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "AES-256-CTR Encryption",
@@ -1629,7 +1683,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-256-CTR Decryption",
@@ -1639,7 +1695,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 6089af4e..20f3296d 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -92,6 +92,10 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+ int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
+ int mrvl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
int nb_segs = 1;
@@ -114,9 +118,11 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
tdata->auth_key.len);
if (driver_id == dpaa2_sec_pmd ||
+ driver_id == dpaa_sec_pmd ||
driver_id == qat_pmd ||
driver_id == openssl_pmd ||
- driver_id == armv8_pmd) { /* Fall through */
+ driver_id == armv8_pmd ||
+ driver_id == mrvl_pmd) { /* Fall through */
digest_len = tdata->digest.len;
} else if (driver_id == aesni_mb_pmd ||
driver_id == scheduler_pmd) {
@@ -323,14 +329,14 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
sym_op->auth.digest.data = pktmbuf_mtod_offset
(iobuf, digest_offset);
sym_op->auth.digest.phys_addr =
- pktmbuf_mtophys_offset(iobuf,
+ pktmbuf_iova_offset(iobuf,
digest_offset);
} else {
auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
sym_op->auth.digest.data = pktmbuf_mtod_offset
(sym_op->m_src, digest_offset);
sym_op->auth.digest.phys_addr =
- pktmbuf_mtophys_offset(sym_op->m_src,
+ pktmbuf_iova_offset(sym_op->m_src,
digest_offset);
}
@@ -452,25 +458,13 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
struct rte_mbuf *mbuf;
uint8_t value;
- uint32_t head_unchanged_len = 0, changed_len = 0;
+ uint32_t head_unchanged_len, changed_len = 0;
uint32_t i;
mbuf = sym_op->m_src;
- if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
- /* white-box test: PMDs use some of the
- * tailroom as temp storage in verify case
- */
- head_unchanged_len = rte_pktmbuf_headroom(mbuf)
- + rte_pktmbuf_data_len(mbuf);
- changed_len = digest_len;
- } else {
- head_unchanged_len = mbuf->buf_len;
- changed_len = 0;
- }
+ head_unchanged_len = mbuf->buf_len;
for (i = 0; i < mbuf->buf_len; i++) {
- if (i == head_unchanged_len)
- i += changed_len;
value = *((uint8_t *)(mbuf->buf_addr)+i);
if (value != tmp_src_buf[i]) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -531,19 +525,6 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
changed_len += digest_len;
- if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
- /* white-box test: PMDs use some of the
- * tailroom as temp storage in verify case
- */
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- /* This is simplified, not checking digest*/
- changed_len += digest_len*2;
- } else {
- head_unchanged_len += digest_len;
- changed_len += digest_len;
- }
- }
-
for (i = 0; i < mbuf->buf_len; i++) {
if (i == head_unchanged_len)
i += changed_len;
@@ -602,8 +583,10 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
int openssl_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
- int dpaa2_pmd = rte_cryptodev_driver_id_get(
+ int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+ int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
int scheduler_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -612,6 +595,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
int qat_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+ int mrvl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
switch (test_type) {
case BLKCIPHER_AES_CHAIN_TYPE:
@@ -668,8 +653,12 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8;
else if (driver_id == scheduler_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
- else if (driver_id == dpaa2_pmd)
+ else if (driver_id == dpaa2_sec_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+ else if (driver_id == dpaa_sec_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
+ else if (driver_id == mrvl_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MRVL;
else
TEST_ASSERT(0, "Unrecognized cryptodev type");
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index 22b8d206..67c78d47 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -53,6 +53,8 @@
#define BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 0x0008 /* ARMv8 flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER 0x0010 /* Scheduler */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_MRVL 0x0080 /* Marvell flag */
#define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0b6e0b8d..d09e23de 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -851,13 +851,19 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
.test_descr = "DES-CBC Encryption",
.test_data = &des_cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "DES-CBC Decryption",
.test_data = &des_cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
};
@@ -965,42 +971,48 @@ static const struct blockcipher_test_case des_docsis_test_cases[] = {
.test_data = &des_test_data_1,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI Runt Block Encryption",
.test_data = &des_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI Uneven Encryption",
.test_data = &des_test_data_3,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI Full Block Decryption",
.test_data = &des_test_data_1,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI Runt Block Decryption",
.test_data = &des_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI Uneven Decryption",
.test_data = &des_test_data_3,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "DES-DOCSIS-BPI OOP Full Block Encryption",
@@ -1059,7 +1071,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1067,7 +1080,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
@@ -1087,7 +1101,9 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1095,19 +1111,23 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1167,7 +1187,9 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest"
@@ -1176,7 +1198,9 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Encryption Digest"
@@ -1204,7 +1228,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-128-CBC Decryption",
@@ -1212,7 +1237,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
},
{
.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1246,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-192-CBC Decryption",
@@ -1228,7 +1256,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_MRVL
},
{
.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index b7fe9d9c..3b840ce6 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -368,6 +368,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -378,6 +379,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -400,6 +402,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -410,6 +413,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -432,6 +436,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -442,6 +447,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -464,6 +470,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -474,6 +481,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -496,6 +504,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -506,6 +515,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -528,6 +538,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -538,6 +549,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
};
diff --git a/test/test/test_cryptodev_perf.c b/test/test/test_cryptodev_perf.c
deleted file mode 100644
index 3b57e6d0..00000000
--- a/test/test/test_cryptodev_perf.c
+++ /dev/null
@@ -1,4932 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_common.h>
-#include <rte_mbuf.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-
-#include <rte_crypto.h>
-#include <rte_cryptodev.h>
-#include <rte_cycles.h>
-
-#include "test.h"
-#include "test_cryptodev.h"
-#include "test_cryptodev_gcm_test_vectors.h"
-
-#define AES_CIPHER_IV_LENGTH 16
-#define TRIPLE_DES_CIPHER_IV_LENGTH 8
-#define AES_GCM_AAD_LENGTH 16
-
-#define PERF_NUM_OPS_INFLIGHT (128)
-#define DEFAULT_NUM_REQS_TO_SUBMIT (10000000)
-
-struct crypto_testsuite_params {
- struct rte_mempool *mbuf_mp;
- struct rte_mempool *op_mpool;
- struct rte_mempool *sess_mp;
-
- uint16_t nb_queue_pairs;
-
- struct rte_cryptodev_config conf;
- struct rte_cryptodev_qp_conf qp_conf;
- uint8_t dev_id;
-};
-
-enum chain_mode {
- CIPHER_HASH,
- HASH_CIPHER,
- CIPHER_ONLY,
- HASH_ONLY,
- AEAD
-};
-
-
-struct symmetric_op {
- const uint8_t *aad_data;
-
- const uint8_t *p_data;
- uint32_t p_len;
-
- const uint8_t *c_data;
- uint32_t c_len;
-
- const uint8_t *t_data;
- uint32_t t_len;
-
-};
-
-struct symmetric_session_attrs {
- enum rte_crypto_cipher_operation cipher;
- enum rte_crypto_auth_operation auth;
- enum rte_crypto_aead_operation aead;
-
- enum rte_crypto_cipher_algorithm cipher_algorithm;
- const uint8_t *key_cipher_data;
- uint32_t key_cipher_len;
-
- enum rte_crypto_auth_algorithm auth_algorithm;
- const uint8_t *key_auth_data;
- uint32_t key_auth_len;
-
- enum rte_crypto_aead_algorithm aead_algorithm;
- const uint8_t *key_aead_data;
- uint32_t key_aead_len;
-
- const uint8_t *iv_data;
- uint16_t iv_len;
- uint16_t aad_len;
- uint32_t digest_len;
-};
-
-static struct rte_cryptodev_sym_session *test_crypto_session;
-
-#define ALIGN_POW2_ROUNDUP(num, align) \
- (((num) + (align) - 1) & ~((align) - 1))
-
-/*
- * This struct is needed to avoid unnecessary allocation or checking
- * of allocation of crypto params with current alloc on the fly
- * implementation.
- */
-
-struct crypto_params {
- uint8_t *aad;
- uint8_t *digest;
-};
-
-struct perf_test_params {
-
- unsigned total_operations;
- unsigned burst_size;
- unsigned buf_size;
-
- enum chain_mode chain;
-
- enum rte_crypto_cipher_algorithm cipher_algo;
- unsigned int key_length;
- enum rte_crypto_auth_algorithm auth_algo;
- enum rte_crypto_aead_algorithm aead_algo;
-
- struct symmetric_session_attrs *session_attrs;
-
- struct symmetric_op *symmetric_op;
-};
-
-#define MAX_NUM_OF_OPS_PER_UT (128)
-
-struct crypto_unittest_params {
- struct rte_crypto_sym_xform cipher_xform;
- struct rte_crypto_sym_xform auth_xform;
-
- struct rte_cryptodev_sym_session *sess;
-
- struct rte_crypto_op *op;
-
- struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
- struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
-
- uint8_t *digest;
-};
-
-static int
-test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo);
-static int
-test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo,
- enum rte_crypto_aead_algorithm aead_algo);
-static int
-test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo);
-
-static struct rte_mbuf *
-test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len);
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain);
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain __rte_unused);
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain __rte_unused);
-static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
-
-
-static const char *chain_mode_name(enum chain_mode mode)
-{
- switch (mode) {
- case CIPHER_HASH: return "cipher_hash"; break;
- case HASH_CIPHER: return "hash_cipher"; break;
- case CIPHER_ONLY: return "cipher_only"; break;
- case HASH_ONLY: return "hash_only"; break;
- case AEAD: return "aead"; break;
- default: return ""; break;
- }
-}
-
-static const char *pmd_name(uint8_t driver_id)
-{
- uint8_t null_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_NULL_PMD));
- uint8_t dpaa2_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
- uint8_t snow3g_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
- uint8_t aesni_gcm_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
- uint8_t aesni_mb_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
- uint8_t qat_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
-
- if (driver_id == null_pmd)
- return RTE_STR(CRYPTODEV_NAME_NULL_PMD);
- else if (driver_id == aesni_gcm_pmd)
- return RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD);
- else if (driver_id == aesni_mb_pmd)
- return RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD);
- else if (driver_id == qat_pmd)
- return RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
- else if (driver_id == snow3g_pmd)
- return RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD);
- else if (driver_id == dpaa2_pmd)
- return RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
- else
- return "";
-}
-
-static struct rte_mbuf *
-setup_test_string(struct rte_mempool *mpool,
- const uint8_t *data, size_t len, uint8_t blocksize)
-{
- struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
- size_t t_len = len - (blocksize ? (len % blocksize) : 0);
-
- if (m) {
- char *dst = rte_pktmbuf_append(m, t_len);
-
- if (!dst) {
- rte_pktmbuf_free(m);
- return NULL;
- }
-
- rte_memcpy(dst, (const void *)data, t_len);
- }
- return m;
-}
-
-static struct crypto_testsuite_params testsuite_params = { NULL };
-static struct crypto_unittest_params unittest_params;
-static int gbl_driver_id;
-
-static int
-testsuite_setup(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_cryptodev_info info;
- unsigned i, nb_devs, valid_dev_id = 0;
- int ret;
- uint16_t qp_id;
-
- ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
- if (ts_params->mbuf_mp == NULL) {
- /* Not already created so create */
- ts_params->mbuf_mp = rte_pktmbuf_pool_create(
- "CRYPTO_PERF_MBUFPOOL",
- NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
- rte_socket_id());
- if (ts_params->mbuf_mp == NULL) {
- RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
- return TEST_FAILED;
- }
- }
-
-
- ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_sym_xform) +
- MAXIMUM_IV_LENGTH,
- rte_socket_id());
- if (ts_params->op_mpool == NULL) {
- RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
- return TEST_FAILED;
- }
-
- /* Create an AESNI MB device if required */
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD))) {
- nb_devs = rte_cryptodev_device_count_by_driver(
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)));
- if (nb_devs < 1) {
- ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
- }
- }
-
- /* Create an AESNI GCM device if required */
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD))) {
- nb_devs = rte_cryptodev_device_count_by_driver(
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD)));
- if (nb_devs < 1) {
- ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
- }
- }
-
- /* Create a SNOW3G device if required */
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD))) {
- nb_devs = rte_cryptodev_device_count_by_driver(
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD)));
- if (nb_devs < 1) {
- ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
- }
- }
-
- /* Create an OPENSSL device if required */
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD))) {
- nb_devs = rte_cryptodev_device_count_by_driver(
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)));
- if (nb_devs < 1) {
- ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
- NULL);
-
- TEST_ASSERT(ret == 0, "Failed to create "
- "instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
- }
- }
-
- /* Create an ARMv8 device if required */
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_ARMV8_PMD))) {
- nb_devs = rte_cryptodev_device_count_by_driver(
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_ARMV8_PMD)));
- if (nb_devs < 1) {
- ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
- NULL);
-
- TEST_ASSERT(ret == 0, "Failed to create "
- "instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
- }
- }
-
- nb_devs = rte_cryptodev_count();
- if (nb_devs < 1) {
- RTE_LOG(ERR, USER1, "No crypto devices found?\n");
- return TEST_FAILED;
- }
-
- /* Search for the first valid */
- for (i = 0; i < nb_devs; i++) {
- rte_cryptodev_info_get(i, &info);
- if (info.driver_id == (uint8_t) gbl_driver_id) {
- ts_params->dev_id = i;
- valid_dev_id = 1;
- break;
- }
- }
-
- if (!valid_dev_id)
- return TEST_FAILED;
-
- /*
- * Using Crypto Device Id 0 by default.
- * Set up all the qps on this device
- */
-
- rte_cryptodev_info_get(ts_params->dev_id, &info);
-
- ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
- ts_params->conf.socket_id = SOCKET_ID_ANY;
-
- unsigned int session_size = sizeof(struct rte_cryptodev_sym_session) +
- rte_cryptodev_get_private_session_size(ts_params->dev_id);
-
- ts_params->sess_mp = rte_mempool_create(
- "test_sess_mp_perf",
- info.sym.max_nb_sessions,
- session_size,
- 0, 0, NULL, NULL, NULL,
- NULL, SOCKET_ID_ANY,
- 0);
-
- TEST_ASSERT_NOT_NULL(ts_params->sess_mp,
- "session mempool allocation failed");
-
- TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
- &ts_params->conf),
- "Failed to configure cryptodev %u",
- ts_params->dev_id);
-
- ts_params->qp_conf.nb_descriptors = PERF_NUM_OPS_INFLIGHT;
- for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
-
- TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
- ts_params->dev_id, qp_id,
- &ts_params->qp_conf,
- rte_cryptodev_socket_id(ts_params->dev_id),
- ts_params->sess_mp),
- "Failed to setup queue pair %u on cryptodev %u",
- qp_id, ts_params->dev_id);
- }
-
- return TEST_SUCCESS;
-}
-static void
-testsuite_teardown(void)
-{
- struct crypto_testsuite_params *ts_params =
- &testsuite_params;
-
- if (ts_params->mbuf_mp != NULL)
- RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_avail_count(ts_params->mbuf_mp));
- if (ts_params->op_mpool != NULL)
- RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_OP POOL count %u\n",
- rte_mempool_avail_count(ts_params->op_mpool));
- /* Free session mempool */
- if (ts_params->sess_mp != NULL) {
- rte_mempool_free(ts_params->sess_mp);
- ts_params->sess_mp = NULL;
- }
-
-}
-
-static int
-ut_setup(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Clear unit test parameters before running test */
- memset(ut_params, 0, sizeof(*ut_params));
-
- rte_cryptodev_stats_reset(ts_params->dev_id);
-
- /* Start the device */
- TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->dev_id),
- "Failed to start cryptodev %u",
- ts_params->dev_id);
-
- return TEST_SUCCESS;
-}
-
-static void
-ut_teardown(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
- struct rte_cryptodev_stats stats;
-
- unsigned i;
-
- /* free crypto session structure */
- if (ut_params->sess) {
- rte_cryptodev_sym_session_clear(ts_params->dev_id,
- ut_params->sess);
- rte_cryptodev_sym_session_free(ut_params->sess);
- }
-
- /* free crypto operation structure */
- if (ut_params->op)
- rte_crypto_op_free(ut_params->op);
-
- for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
- if (ut_params->obuf[i])
- rte_pktmbuf_free(ut_params->obuf[i]);
- else if (ut_params->ibuf[i])
- rte_pktmbuf_free(ut_params->ibuf[i]);
- }
-
- if (ts_params->mbuf_mp != NULL)
- RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_avail_count(ts_params->mbuf_mp));
-
- rte_cryptodev_stats_get(ts_params->dev_id, &stats);
-
- /* Stop the device */
- rte_cryptodev_stop(ts_params->dev_id);
-}
-
-const char plaintext_quote[] =
- "THE COUNT OF MONTE CRISTO by Alexandre Dumas, Pere Chapter 1. "
- "Marseilles--The Arrival. On the 24th of February, 1815, the "
- "look-out at Notre-Dame de la Garde signalled the three-master,"
- " the Pharaon from Smyrna, Trieste, and Naples. As usual, a "
- "pilot put off immediately, and rounding the Chateau d'If, got "
- "on board the vessel between Cape Morgion and Rion island. "
- "Immediately, and according to custom, the ramparts of Fort "
- "Saint-Jean were covered with spectators; it is always an event "
- "at Marseilles for a ship to come into port, especially when "
- "this ship, like the Pharaon, has been built, rigged, and laden"
- " at the old Phocee docks, and belongs to an owner of the city."
- " The ship drew on and had safely passed the strait, which some"
- " volcanic shock has made between the Calasareigne and Jaros "
- "islands; had doubled Pomegue, and approached the harbor under"
- " topsails, jib, and spanker, but so slowly and sedately that"
- " the idlers, with that instinct which is the forerunner of "
- "evil, asked one another what misfortune could have happened "
- "on board. However, those experienced in navigation saw plainly"
- " that if any accident had occurred, it was not to the vessel "
- "herself, for she bore down with all the evidence of being "
- "skilfully handled, the anchor a-cockbill, the jib-boom guys "
- "already eased off, and standing by the side of the pilot, who"
- " was steering the Pharaon towards the narrow entrance of the"
- " inner port, was a young man, who, with activity and vigilant"
- " eye, watched every motion of the ship, and repeated each "
- "direction of the pilot. The vague disquietude which prevailed "
- "among the spectators had so much affected one of the crowd "
- "that he did not await the arrival of the vessel in harbor, but"
- " jumping into a small skiff, desired to be pulled alongside "
- "the Pharaon, which he reached as she rounded into La Reserve "
- "basin. When the young man on board saw this person approach, "
- "he left his station by the pilot, and, hat in hand, leaned "
- "over the ship's bulwarks. He was a fine, tall, slim young "
- "fellow of eighteen or twenty, with black eyes, and hair as "
- "dark as a raven's wing; and his whole appearance bespoke that "
- "calmness and resolution peculiar to men accustomed from their "
- "cradle to contend with danger. \"Ah, is it you, Dantes?\" "
- "cried the man in the skiff. \"What's the matter? and why have "
- "you such an air of sadness aboard?\" \"A great misfortune, M. "
- "Morrel,\" replied the young man,--\"a great misfortune, for me"
- " especially! Off Civita Vecchia we lost our brave Captain "
- "Leclere.\" \"And the cargo?\" inquired the owner, eagerly. "
- "\"Is all safe, M. Morrel; and I think you will be satisfied on"
- " that head. But poor Captain Leclere--\" \"What happened to "
- "him?\" asked the owner, with an air of considerable "
- "resignation. \"What happened to the worthy captain?\" \"He "
- "died.\" \"Fell into the sea?\" \"No, sir, he died of "
- "brain-fever in dreadful agony.\" Then turning to the crew, "
- "he said, \"Bear a hand there, to take in sail!\" All hands "
- "obeyed, and at once the eight or ten seamen who composed the "
- "crew, sprang to their respective stations at the spanker "
- "brails and outhaul, topsail sheets and halyards, the jib "
- "downhaul, and the topsail clewlines and buntlines. The young "
- "sailor gave a look to see that his orders were promptly and "
- "accurately obeyed, and then turned again to the owner. \"And "
- "how did this misfortune occur?\" inquired the latter, resuming"
- " the interrupted conversation. \"Alas, sir, in the most "
- "unexpected manner. After a long talk with the harbor-master, "
- "Captain Leclere left Naples greatly disturbed in mind. In "
- "twenty-four hours he was attacked by a fever, and died three "
- "days afterwards. We performed the usual burial service, and he"
- " is at his rest, sewn up in his hammock with a thirty-six "
- "pound shot at his head and his heels, off El Giglio island. "
- "We bring to his widow his sword and cross of honor. It was "
- "worth while, truly,\" added the young man with a melancholy "
- "smile, \"to make war against the English for ten years, and "
- "to die in his bed at last, like everybody else.";
-
-#define QUOTE_LEN_64B (64)
-#define QUOTE_LEN_128B (128)
-#define QUOTE_LEN_256B (256)
-#define QUOTE_LEN_512B (512)
-#define QUOTE_LEN_768B (768)
-#define QUOTE_LEN_1024B (1024)
-#define QUOTE_LEN_1280B (1280)
-#define QUOTE_LEN_1536B (1536)
-#define QUOTE_LEN_1792B (1792)
-#define QUOTE_LEN_2048B (2048)
-
-
-/* ***** AES-CBC / HMAC-SHA256 Performance Tests ***** */
-
-#define HMAC_KEY_LENGTH_SHA256 (DIGEST_BYTE_LENGTH_SHA256)
-
-#define CIPHER_KEY_LENGTH_AES_CBC (16)
-#define CIPHER_IV_LENGTH_AES_CBC (CIPHER_KEY_LENGTH_AES_CBC)
-
-static uint8_t aes_cbc_128_key[] = {
- 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
- 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA };
-
-static uint8_t aes_cbc_128_iv[] = {
- 0xf5, 0xd3, 0x89, 0x0f, 0x47, 0x00, 0xcb, 0x52,
- 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1 };
-
-static uint8_t hmac_sha256_key[] = {
- 0xff, 0xcb, 0x37, 0x30, 0x1d, 0x4a, 0xc2, 0x41,
- 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
- 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
- 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60 };
-
-
-/* Cipher text output */
-
-static const uint8_t AES_CBC_ciphertext_64B[] = {
- 0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50,
- 0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0,
- 0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36,
- 0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e,
- 0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c,
- 0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e,
- 0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6,
- 0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c
-};
-
-static const uint8_t AES_CBC_ciphertext_128B[] = {
- 0x79, 0x92, 0x65, 0xc8, 0xfb, 0x0a, 0xc7, 0xc4,
- 0x9b, 0x3b, 0xbe, 0x69, 0x7f, 0x7c, 0xf4, 0x4e,
- 0xa5, 0x0d, 0xf6, 0x33, 0xc4, 0xdf, 0xf3, 0x0d,
- 0xdb, 0xb9, 0x68, 0x34, 0xb0, 0x0d, 0xbd, 0xb9,
- 0xa7, 0xf3, 0x86, 0x50, 0x2a, 0xbe, 0x50, 0x5d,
- 0xb3, 0xbe, 0x72, 0xf9, 0x02, 0xb1, 0x69, 0x0b,
- 0x8c, 0x96, 0x4c, 0x3c, 0x0c, 0x1e, 0x76, 0xe5,
- 0x7e, 0x75, 0xdd, 0xd0, 0xa9, 0x75, 0x00, 0x13,
- 0x6b, 0x1e, 0xc0, 0xad, 0xfc, 0x03, 0xb5, 0x99,
- 0xdc, 0x37, 0x35, 0xfc, 0x16, 0x34, 0xfd, 0xb4,
- 0xea, 0x1e, 0xb6, 0x51, 0xdf, 0xab, 0x87, 0xd6,
- 0x87, 0x41, 0xfa, 0x1c, 0xc6, 0x78, 0xa6, 0x3c,
- 0x1d, 0x76, 0xfe, 0xff, 0x65, 0xfc, 0x63, 0x1e,
- 0x1f, 0xe2, 0x7c, 0x9b, 0xa2, 0x72, 0xc3, 0x34,
- 0x23, 0xdf, 0x01, 0xf0, 0xfd, 0x02, 0x8b, 0x97,
- 0x00, 0x2b, 0x97, 0x4e, 0xab, 0x98, 0x21, 0x3c
-};
-
-static const uint8_t AES_CBC_ciphertext_256B[] = {
- 0xc7, 0x71, 0x2b, 0xed, 0x2c, 0x97, 0x59, 0xfa,
- 0xcf, 0x5a, 0xb9, 0x31, 0x92, 0xe0, 0xc9, 0x92,
- 0xc0, 0x2d, 0xd5, 0x9c, 0x84, 0xbf, 0x70, 0x36,
- 0x13, 0x48, 0xe0, 0xb1, 0xbf, 0x6c, 0xcd, 0x91,
- 0xa0, 0xc3, 0x57, 0x6c, 0x3f, 0x0e, 0x34, 0x41,
- 0xe7, 0x9c, 0xc0, 0xec, 0x18, 0x0c, 0x05, 0x52,
- 0x78, 0xe2, 0x3c, 0x6e, 0xdf, 0xa5, 0x49, 0xc7,
- 0xf2, 0x55, 0x00, 0x8f, 0x65, 0x6d, 0x4b, 0xd0,
- 0xcb, 0xd4, 0xd2, 0x0b, 0xea, 0xf4, 0xb0, 0x85,
- 0x61, 0x9e, 0x36, 0xc0, 0x71, 0xb7, 0x80, 0xad,
- 0x40, 0x78, 0xb4, 0x70, 0x2b, 0xe8, 0x80, 0xc5,
- 0x19, 0x35, 0x96, 0x55, 0x3b, 0x40, 0x03, 0xbb,
- 0x9f, 0xa6, 0xc2, 0x82, 0x92, 0x04, 0xc3, 0xa6,
- 0x96, 0xc4, 0x7f, 0x4c, 0x3e, 0x3c, 0x79, 0x82,
- 0x88, 0x8b, 0x3f, 0x8b, 0xc5, 0x9f, 0x44, 0xbe,
- 0x71, 0xe7, 0x09, 0xa2, 0x40, 0xa2, 0x23, 0x4e,
- 0x9f, 0x31, 0xab, 0x6f, 0xdf, 0x59, 0x40, 0xe1,
- 0x12, 0x15, 0x55, 0x4b, 0xea, 0x3f, 0xa1, 0x41,
- 0x4f, 0xaf, 0xcd, 0x27, 0x2a, 0x61, 0xa1, 0x9e,
- 0x82, 0x30, 0x05, 0x05, 0x55, 0xce, 0x99, 0xd3,
- 0x8f, 0x3f, 0x86, 0x79, 0xdc, 0x9f, 0x33, 0x07,
- 0x75, 0x26, 0xc8, 0x72, 0x81, 0x0f, 0x9b, 0xf7,
- 0xb1, 0xfb, 0xd3, 0x91, 0x36, 0x08, 0xab, 0x26,
- 0x70, 0x53, 0x0c, 0x99, 0xfd, 0xa9, 0x07, 0xb4,
- 0xe9, 0xce, 0xc1, 0xd6, 0xd2, 0x2c, 0x71, 0x80,
- 0xec, 0x59, 0x61, 0x0b, 0x24, 0xf0, 0x6d, 0x33,
- 0x73, 0x45, 0x6e, 0x80, 0x03, 0x45, 0xf2, 0x76,
- 0xa5, 0x8a, 0xc9, 0xcf, 0xaf, 0x4a, 0xed, 0x35,
- 0xc0, 0x97, 0x52, 0xc5, 0x00, 0xdf, 0xef, 0xc7,
- 0x9f, 0xf2, 0xe8, 0x15, 0x3e, 0xb3, 0x30, 0xe7,
- 0x00, 0xd0, 0x4e, 0xeb, 0x79, 0xf6, 0xf6, 0xcf,
- 0xf0, 0xe7, 0x61, 0xd5, 0x3d, 0x6a, 0x73, 0x9d
-};
-
-static const uint8_t AES_CBC_ciphertext_512B[] = {
- 0xb4, 0xc6, 0xc6, 0x5f, 0x7e, 0xca, 0x05, 0x70,
- 0x21, 0x7b, 0x92, 0x9e, 0x23, 0xe7, 0x92, 0xb8,
- 0x27, 0x3d, 0x20, 0x29, 0x57, 0xfa, 0x1f, 0x26,
- 0x0a, 0x04, 0x34, 0xa6, 0xf2, 0xdc, 0x44, 0xb6,
- 0x43, 0x40, 0x62, 0xde, 0x0c, 0xde, 0x1c, 0x30,
- 0x43, 0x85, 0x0b, 0xe8, 0x93, 0x1f, 0xa1, 0x2a,
- 0x8a, 0x27, 0x35, 0x39, 0x14, 0x9f, 0x37, 0x64,
- 0x59, 0xb5, 0x0e, 0x96, 0x82, 0x5d, 0x63, 0x45,
- 0xd6, 0x93, 0x89, 0x46, 0xe4, 0x71, 0x31, 0xeb,
- 0x0e, 0xd1, 0x7b, 0xda, 0x90, 0xb5, 0x81, 0xac,
- 0x76, 0x54, 0x54, 0x85, 0x0b, 0xa9, 0x46, 0x9c,
- 0xf0, 0xfd, 0xde, 0x5d, 0xa8, 0xe3, 0xee, 0xe9,
- 0xf4, 0x9d, 0x34, 0x76, 0x39, 0xe7, 0xc3, 0x4a,
- 0x84, 0x38, 0x92, 0x61, 0xf1, 0x12, 0x9f, 0x05,
- 0xda, 0xdb, 0xc1, 0xd4, 0xb0, 0xa0, 0x27, 0x19,
- 0xa0, 0x56, 0x5d, 0x9b, 0xcc, 0x47, 0x7c, 0x15,
- 0x1d, 0x52, 0x66, 0xd5, 0xff, 0xef, 0x12, 0x23,
- 0x86, 0xe2, 0xee, 0x81, 0x2c, 0x3d, 0x7d, 0x28,
- 0xd5, 0x42, 0xdf, 0xdb, 0x75, 0x1c, 0xeb, 0xdf,
- 0x13, 0x23, 0xd5, 0x17, 0x89, 0xea, 0xd7, 0x01,
- 0xff, 0x57, 0x6a, 0x44, 0x61, 0xf4, 0xea, 0xbe,
- 0x97, 0x9b, 0xc2, 0xb1, 0x9c, 0x5d, 0xff, 0x4f,
- 0x73, 0x2d, 0x3f, 0x57, 0x28, 0x38, 0xbf, 0x3d,
- 0x9f, 0xda, 0x49, 0x55, 0x8f, 0xb2, 0x77, 0xec,
- 0x0f, 0xbc, 0xce, 0xb8, 0xc6, 0xe1, 0x03, 0xed,
- 0x35, 0x9c, 0xf2, 0x4d, 0xa4, 0x29, 0x6c, 0xd6,
- 0x6e, 0x05, 0x53, 0x46, 0xc1, 0x41, 0x09, 0x36,
- 0x0b, 0x7d, 0xf4, 0x9e, 0x0f, 0xba, 0x86, 0x33,
- 0xdd, 0xf1, 0xa7, 0xf7, 0xd5, 0x29, 0xa8, 0xa7,
- 0x4d, 0xce, 0x0c, 0xf5, 0xb4, 0x6c, 0xd8, 0x27,
- 0xb0, 0x87, 0x2a, 0x6f, 0x7f, 0x3f, 0x8f, 0xc3,
- 0xe2, 0x3e, 0x94, 0xcf, 0x61, 0x4a, 0x09, 0x3d,
- 0xf9, 0x55, 0x19, 0x31, 0xf2, 0xd2, 0x4a, 0x3e,
- 0xc1, 0xf5, 0xed, 0x7c, 0x45, 0xb0, 0x0c, 0x7b,
- 0xdd, 0xa6, 0x0a, 0x26, 0x66, 0xec, 0x85, 0x49,
- 0x00, 0x38, 0x05, 0x7c, 0x9c, 0x1c, 0x92, 0xf5,
- 0xf7, 0xdb, 0x5d, 0xbd, 0x61, 0x0c, 0xc9, 0xaf,
- 0xfd, 0x57, 0x3f, 0xee, 0x2b, 0xad, 0x73, 0xef,
- 0xa3, 0xc1, 0x66, 0x26, 0x44, 0x5e, 0xf9, 0x12,
- 0x86, 0x66, 0xa9, 0x61, 0x75, 0xa1, 0xbc, 0x40,
- 0x7f, 0xa8, 0x08, 0x02, 0xc0, 0x76, 0x0e, 0x76,
- 0xb3, 0x26, 0x3d, 0x1c, 0x40, 0x65, 0xe4, 0x18,
- 0x0f, 0x62, 0x17, 0x8f, 0x1e, 0x61, 0xb8, 0x08,
- 0x83, 0x54, 0x42, 0x11, 0x03, 0x30, 0x8e, 0xb7,
- 0xc1, 0x9c, 0xec, 0x69, 0x52, 0x95, 0xfb, 0x7b,
- 0x1a, 0x0c, 0x20, 0x24, 0xf7, 0xb8, 0x38, 0x0c,
- 0xb8, 0x7b, 0xb6, 0x69, 0x70, 0xd0, 0x61, 0xb9,
- 0x70, 0x06, 0xc2, 0x5b, 0x20, 0x47, 0xf7, 0xd9,
- 0x32, 0xc2, 0xf2, 0x90, 0xb6, 0x4d, 0xcd, 0x3c,
- 0x6d, 0x74, 0xea, 0x82, 0x35, 0x1b, 0x08, 0x44,
- 0xba, 0xb7, 0x33, 0x82, 0x33, 0x27, 0x54, 0x77,
- 0x6e, 0x58, 0xfe, 0x46, 0x5a, 0xb4, 0x88, 0x53,
- 0x8d, 0x9b, 0xb1, 0xab, 0xdf, 0x04, 0xe1, 0xfb,
- 0xd7, 0x1e, 0xd7, 0x38, 0x64, 0x54, 0xba, 0xb0,
- 0x6c, 0x84, 0x7a, 0x0f, 0xa7, 0x80, 0x6b, 0x86,
- 0xd9, 0xc9, 0xc6, 0x31, 0x95, 0xfa, 0x8a, 0x2c,
- 0x14, 0xe1, 0x85, 0x66, 0x27, 0xfd, 0x63, 0x3e,
- 0xf0, 0xfa, 0x81, 0xc9, 0x89, 0x4f, 0xe2, 0x6a,
- 0x8c, 0x17, 0xb5, 0xc7, 0x9f, 0x5d, 0x3f, 0x6b,
- 0x3f, 0xcd, 0x13, 0x7a, 0x3c, 0xe6, 0x4e, 0xfa,
- 0x7a, 0x10, 0xb8, 0x7c, 0x40, 0xec, 0x93, 0x11,
- 0x1f, 0xd0, 0x9e, 0xc3, 0x56, 0xb9, 0xf5, 0x21,
- 0x18, 0x41, 0x31, 0xea, 0x01, 0x8d, 0xea, 0x1c,
- 0x95, 0x5e, 0x56, 0x33, 0xbc, 0x7a, 0x3f, 0x6f
-};
-
-static const uint8_t AES_CBC_ciphertext_768B[] = {
- 0x3e, 0x7f, 0x9e, 0x4c, 0x88, 0x15, 0x68, 0x69,
- 0x10, 0x09, 0xe1, 0xa7, 0x0f, 0x27, 0x88, 0x2d,
- 0x90, 0x73, 0x4f, 0x67, 0xd3, 0x8b, 0xaf, 0xa1,
- 0x2c, 0x37, 0xa5, 0x6c, 0x7c, 0xbd, 0x95, 0x4c,
- 0x82, 0xcf, 0x05, 0x49, 0x16, 0x5c, 0xe7, 0x06,
- 0xd4, 0xcb, 0x55, 0x65, 0x9a, 0xd0, 0xe1, 0x46,
- 0x3a, 0x37, 0x71, 0xad, 0xb0, 0xb4, 0x99, 0x1e,
- 0x23, 0x57, 0x48, 0x96, 0x9c, 0xc5, 0xc4, 0xdb,
- 0x64, 0x3e, 0xc9, 0x7f, 0x90, 0x5a, 0xa0, 0x08,
- 0x75, 0x4c, 0x09, 0x06, 0x31, 0x6e, 0x59, 0x29,
- 0xfc, 0x2f, 0x72, 0xde, 0xf2, 0x40, 0x5a, 0xfe,
- 0xd3, 0x66, 0x64, 0xb8, 0x9c, 0xc9, 0xa6, 0x1f,
- 0xc3, 0x52, 0xcd, 0xb5, 0xd1, 0x4f, 0x43, 0x3f,
- 0xf4, 0x59, 0x25, 0xc4, 0xdd, 0x3e, 0x58, 0x7c,
- 0x21, 0xd6, 0x21, 0xce, 0xa4, 0xbe, 0x08, 0x23,
- 0x46, 0x68, 0xc0, 0x00, 0x91, 0x47, 0xca, 0x9b,
- 0xe0, 0xb4, 0xe3, 0xab, 0xbf, 0xcf, 0x68, 0x26,
- 0x97, 0x23, 0x09, 0x93, 0x64, 0x8f, 0x57, 0x59,
- 0xe2, 0x41, 0x7c, 0xa2, 0x48, 0x7e, 0xd5, 0x2c,
- 0x54, 0x09, 0x1b, 0x07, 0x94, 0xca, 0x39, 0x83,
- 0xdd, 0xf4, 0x7a, 0x1d, 0x2d, 0xdd, 0x67, 0xf7,
- 0x3c, 0x30, 0x89, 0x3e, 0xc1, 0xdc, 0x1d, 0x8f,
- 0xfc, 0xb1, 0xe9, 0x13, 0x31, 0xb0, 0x16, 0xdb,
- 0x88, 0xf2, 0x32, 0x7e, 0x73, 0xa3, 0xdf, 0x08,
- 0x6b, 0x53, 0x92, 0x08, 0xc9, 0x9d, 0x98, 0xb2,
- 0xf4, 0x8c, 0xb1, 0x95, 0xdc, 0xb6, 0xfc, 0xec,
- 0xf1, 0xc9, 0x0d, 0x6d, 0x42, 0x2c, 0xf5, 0x38,
- 0x29, 0xf4, 0xd8, 0x98, 0x0f, 0xb0, 0x81, 0xa5,
- 0xaa, 0xe6, 0x1f, 0x6e, 0x87, 0x32, 0x1b, 0x02,
- 0x07, 0x57, 0x38, 0x83, 0xf3, 0xe4, 0x54, 0x7c,
- 0xa8, 0x43, 0xdf, 0x3f, 0x42, 0xfd, 0x67, 0x28,
- 0x06, 0x4d, 0xea, 0xce, 0x1f, 0x84, 0x4a, 0xcd,
- 0x8c, 0x61, 0x5e, 0x8f, 0x61, 0xed, 0x84, 0x03,
- 0x53, 0x6a, 0x9e, 0xbf, 0x68, 0x83, 0xa7, 0x42,
- 0x56, 0x57, 0xcd, 0x45, 0x29, 0xfc, 0x7b, 0x07,
- 0xfc, 0xe9, 0xb9, 0x42, 0xfd, 0x29, 0xd5, 0xfd,
- 0x98, 0x11, 0xd1, 0x8d, 0x67, 0x29, 0x47, 0x61,
- 0xd8, 0x27, 0x37, 0x79, 0x29, 0xd1, 0x94, 0x6f,
- 0x8d, 0xf3, 0x1b, 0x3d, 0x6a, 0xb1, 0x59, 0xef,
- 0x1b, 0xd4, 0x70, 0x0e, 0xac, 0xab, 0xa0, 0x2b,
- 0x1f, 0x5e, 0x04, 0xf0, 0x0e, 0x35, 0x72, 0x90,
- 0xfc, 0xcf, 0x86, 0x43, 0xea, 0x45, 0x6d, 0x22,
- 0x63, 0x06, 0x1a, 0x58, 0xd7, 0x2d, 0xc5, 0xb0,
- 0x60, 0x69, 0xe8, 0x53, 0xc2, 0xa2, 0x57, 0x83,
- 0xc4, 0x31, 0xb4, 0xc6, 0xb3, 0xa1, 0x77, 0xb3,
- 0x1c, 0xca, 0x89, 0x3f, 0xf5, 0x10, 0x3b, 0x36,
- 0x31, 0x7d, 0x00, 0x46, 0x00, 0x92, 0xa0, 0xa0,
- 0x34, 0xd8, 0x5e, 0x62, 0xa9, 0xe0, 0x23, 0x37,
- 0x50, 0x85, 0xc7, 0x3a, 0x20, 0xa3, 0x98, 0xc0,
- 0xac, 0x20, 0x06, 0x0f, 0x17, 0x3c, 0xfc, 0x43,
- 0x8c, 0x9d, 0xec, 0xf5, 0x9a, 0x35, 0x96, 0xf7,
- 0xb7, 0x4c, 0xf9, 0x69, 0xf8, 0xd4, 0x1e, 0x9e,
- 0xf9, 0x7c, 0xc4, 0xd2, 0x11, 0x14, 0x41, 0xb9,
- 0x89, 0xd6, 0x07, 0xd2, 0x37, 0x07, 0x5e, 0x5e,
- 0xae, 0x60, 0xdc, 0xe4, 0xeb, 0x38, 0x48, 0x6d,
- 0x95, 0x8d, 0x71, 0xf2, 0xba, 0xda, 0x5f, 0x08,
- 0x9d, 0x4a, 0x0f, 0x56, 0x90, 0x64, 0xab, 0xb6,
- 0x88, 0x22, 0xa8, 0x90, 0x1f, 0x76, 0x2c, 0x83,
- 0x43, 0xce, 0x32, 0x55, 0x45, 0x84, 0x57, 0x43,
- 0xf9, 0xa8, 0xd1, 0x4f, 0xe3, 0xc1, 0x72, 0x9c,
- 0xeb, 0x64, 0xf7, 0xe4, 0x61, 0x2b, 0x93, 0xd1,
- 0x1f, 0xbb, 0x5c, 0xff, 0xa1, 0x59, 0x69, 0xcf,
- 0xf7, 0xaf, 0x58, 0x45, 0xd5, 0x3e, 0x98, 0x7d,
- 0x26, 0x39, 0x5c, 0x75, 0x3c, 0x4a, 0xbf, 0x5e,
- 0x12, 0x10, 0xb0, 0x93, 0x0f, 0x86, 0x82, 0xcf,
- 0xb2, 0xec, 0x70, 0x5c, 0x0b, 0xad, 0x5d, 0x63,
- 0x65, 0x32, 0xa6, 0x04, 0x58, 0x03, 0x91, 0x2b,
- 0xdb, 0x8f, 0xd3, 0xa3, 0x2b, 0x3a, 0xf5, 0xa1,
- 0x62, 0x6c, 0xb6, 0xf0, 0x13, 0x3b, 0x8c, 0x07,
- 0x10, 0x82, 0xc9, 0x56, 0x24, 0x87, 0xfc, 0x56,
- 0xe8, 0xef, 0x90, 0x8b, 0xd6, 0x48, 0xda, 0x53,
- 0x04, 0x49, 0x41, 0xa4, 0x67, 0xe0, 0x33, 0x24,
- 0x6b, 0x9c, 0x07, 0x55, 0x4c, 0x5d, 0xe9, 0x35,
- 0xfa, 0xbd, 0xea, 0xa8, 0x3f, 0xe9, 0xf5, 0x20,
- 0x5c, 0x60, 0x0f, 0x0d, 0x24, 0xcb, 0x1a, 0xd6,
- 0xe8, 0x5c, 0xa8, 0x42, 0xae, 0xd0, 0xd2, 0xf2,
- 0xa8, 0xbe, 0xea, 0x0f, 0x8d, 0xfb, 0x81, 0xa3,
- 0xa4, 0xef, 0xb7, 0x3e, 0x91, 0xbd, 0x26, 0x0f,
- 0x8e, 0xf1, 0xb2, 0xa5, 0x47, 0x06, 0xfa, 0x40,
- 0x8b, 0x31, 0x7a, 0x5a, 0x74, 0x2a, 0x0a, 0x7c,
- 0x62, 0x5d, 0x39, 0xa4, 0xae, 0x14, 0x85, 0x08,
- 0x5b, 0x20, 0x85, 0xf1, 0x57, 0x6e, 0x71, 0x13,
- 0x4e, 0x2b, 0x49, 0x87, 0x01, 0xdf, 0x37, 0xed,
- 0x28, 0xee, 0x4d, 0xa1, 0xf4, 0xb3, 0x3b, 0xba,
- 0x2d, 0xb3, 0x46, 0x17, 0x84, 0x80, 0x9d, 0xd7,
- 0x93, 0x1f, 0x28, 0x7c, 0xf5, 0xf9, 0xd6, 0x85,
- 0x8c, 0xa5, 0x44, 0xe9, 0x2c, 0x65, 0x51, 0x5f,
- 0x53, 0x7a, 0x09, 0xd9, 0x30, 0x16, 0x95, 0x89,
- 0x9c, 0x0b, 0xef, 0x90, 0x6d, 0x23, 0xd3, 0x48,
- 0x57, 0x3b, 0x55, 0x69, 0x96, 0xfc, 0xf7, 0x52,
- 0x92, 0x38, 0x36, 0xbf, 0xa9, 0x0a, 0xbb, 0x68,
- 0x45, 0x08, 0x25, 0xee, 0x59, 0xfe, 0xee, 0xf2,
- 0x2c, 0xd4, 0x5f, 0x78, 0x59, 0x0d, 0x90, 0xf1,
- 0xd7, 0xe4, 0x39, 0x0e, 0x46, 0x36, 0xf5, 0x75,
- 0x03, 0x3c, 0x28, 0xfb, 0xfa, 0x8f, 0xef, 0xc9,
- 0x61, 0x00, 0x94, 0xc3, 0xd2, 0x0f, 0xd9, 0xda
-};
-
-static const uint8_t AES_CBC_ciphertext_1024B[] = {
- 0x7d, 0x01, 0x7e, 0x2f, 0x92, 0xb3, 0xea, 0x72,
- 0x4a, 0x3f, 0x10, 0xf9, 0x2b, 0xb0, 0xd5, 0xb9,
- 0x19, 0x68, 0x94, 0xe9, 0x93, 0xe9, 0xd5, 0x26,
- 0x20, 0x44, 0xe2, 0x47, 0x15, 0x8d, 0x75, 0x48,
- 0x8e, 0xe4, 0x40, 0x81, 0xb5, 0x06, 0xa8, 0xb8,
- 0x0e, 0x0f, 0x3b, 0xbc, 0x5b, 0xbe, 0x3b, 0xa2,
- 0x2a, 0x0c, 0x48, 0x98, 0x19, 0xdf, 0xe9, 0x25,
- 0x75, 0xab, 0x93, 0x44, 0xb1, 0x72, 0x70, 0xbb,
- 0x20, 0xcf, 0x78, 0xe9, 0x4d, 0xc6, 0xa9, 0xa9,
- 0x84, 0x78, 0xc5, 0xc0, 0xc4, 0xc9, 0x79, 0x1a,
- 0xbc, 0x61, 0x25, 0x5f, 0xac, 0x01, 0x03, 0xb7,
- 0xef, 0x07, 0xf2, 0x62, 0x98, 0xee, 0xe3, 0xad,
- 0x94, 0x75, 0x30, 0x67, 0xb9, 0x15, 0x00, 0xe7,
- 0x11, 0x32, 0x2e, 0x6b, 0x55, 0x9f, 0xac, 0x68,
- 0xde, 0x61, 0x05, 0x80, 0x01, 0xf3, 0xad, 0xab,
- 0xaf, 0x45, 0xe0, 0xf4, 0x68, 0x5c, 0xc0, 0x52,
- 0x92, 0xc8, 0x21, 0xb6, 0xf5, 0x8a, 0x1d, 0xbb,
- 0xfc, 0x4a, 0x11, 0x62, 0xa2, 0xc4, 0xf1, 0x2d,
- 0x0e, 0xb2, 0xc7, 0x17, 0x34, 0xb4, 0x2a, 0x54,
- 0x81, 0xc2, 0x1e, 0xcf, 0x51, 0x0a, 0x76, 0x54,
- 0xf1, 0x48, 0x0d, 0x5c, 0xcd, 0x38, 0x3e, 0x38,
- 0x3e, 0xf8, 0x46, 0x1d, 0x00, 0xf5, 0x62, 0xe1,
- 0x5c, 0xb7, 0x8d, 0xce, 0xd0, 0x3f, 0xbb, 0x22,
- 0xf1, 0xe5, 0xb1, 0xa0, 0x58, 0x5e, 0x3c, 0x0f,
- 0x15, 0xd1, 0xac, 0x3e, 0xc7, 0x72, 0xc4, 0xde,
- 0x8b, 0x95, 0x3e, 0x91, 0xf7, 0x1d, 0x04, 0x9a,
- 0xc8, 0xe4, 0xbf, 0xd3, 0x22, 0xca, 0x4a, 0xdc,
- 0xb6, 0x16, 0x79, 0x81, 0x75, 0x2f, 0x6b, 0xa7,
- 0x04, 0x98, 0xa7, 0x4e, 0xc1, 0x19, 0x90, 0x33,
- 0x33, 0x3c, 0x7f, 0xdd, 0xac, 0x09, 0x0c, 0xc3,
- 0x91, 0x34, 0x74, 0xab, 0xa5, 0x35, 0x0a, 0x13,
- 0xc3, 0x56, 0x67, 0x6d, 0x1a, 0x3e, 0xbf, 0x56,
- 0x06, 0x67, 0x15, 0x5f, 0xfc, 0x8b, 0xa2, 0x3c,
- 0x5e, 0xaf, 0x56, 0x1f, 0xe3, 0x2e, 0x9d, 0x0a,
- 0xf9, 0x9b, 0xc7, 0xb5, 0x03, 0x1c, 0x68, 0x99,
- 0xfa, 0x3c, 0x37, 0x59, 0xc1, 0xf7, 0x6a, 0x83,
- 0x22, 0xee, 0xca, 0x7f, 0x7d, 0x49, 0xe6, 0x48,
- 0x84, 0x54, 0x7a, 0xff, 0xb3, 0x72, 0x21, 0xd8,
- 0x7a, 0x5d, 0xb1, 0x4b, 0xcc, 0x01, 0x6f, 0x90,
- 0xc6, 0x68, 0x1c, 0x2c, 0xa1, 0xe2, 0x74, 0x40,
- 0x26, 0x9b, 0x57, 0x53, 0xa3, 0x7c, 0x0b, 0x0d,
- 0xcf, 0x05, 0x5d, 0x62, 0x4f, 0x75, 0x06, 0x62,
- 0x1f, 0x26, 0x32, 0xaa, 0x25, 0xcc, 0x26, 0x8d,
- 0xae, 0x01, 0x47, 0xa3, 0x00, 0x42, 0xe2, 0x4c,
- 0xee, 0x29, 0xa2, 0x81, 0xa0, 0xfd, 0xeb, 0xff,
- 0x9a, 0x66, 0x6e, 0x47, 0x5b, 0xab, 0x93, 0x5a,
- 0x02, 0x6d, 0x6f, 0xf2, 0x6e, 0x02, 0x9d, 0xb1,
- 0xab, 0x56, 0xdc, 0x8b, 0x9b, 0x17, 0xa8, 0xfb,
- 0x87, 0x42, 0x7c, 0x91, 0x1e, 0x14, 0xc6, 0x6f,
- 0xdc, 0xf0, 0x27, 0x30, 0xfa, 0x3f, 0xc4, 0xad,
- 0x57, 0x85, 0xd2, 0xc9, 0x32, 0x2c, 0x13, 0xa6,
- 0x04, 0x04, 0x50, 0x05, 0x2f, 0x72, 0xd9, 0x44,
- 0x55, 0x6e, 0x93, 0x40, 0xed, 0x7e, 0xd4, 0x40,
- 0x3e, 0x88, 0x3b, 0x8b, 0xb6, 0xeb, 0xc6, 0x5d,
- 0x9c, 0x99, 0xa1, 0xcf, 0x30, 0xb2, 0xdc, 0x48,
- 0x8a, 0x01, 0xa7, 0x61, 0x77, 0x50, 0x14, 0xf3,
- 0x0c, 0x49, 0x53, 0xb3, 0xb4, 0xb4, 0x28, 0x41,
- 0x4a, 0x2d, 0xd2, 0x4d, 0x2a, 0x30, 0x31, 0x83,
- 0x03, 0x5e, 0xaa, 0xd3, 0xa3, 0xd1, 0xa1, 0xca,
- 0x62, 0xf0, 0xe1, 0xf2, 0xff, 0xf0, 0x19, 0xa6,
- 0xde, 0x22, 0x47, 0xb5, 0x28, 0x7d, 0xf7, 0x07,
- 0x16, 0x0d, 0xb1, 0x55, 0x81, 0x95, 0xe5, 0x1d,
- 0x4d, 0x78, 0xa9, 0x3e, 0xce, 0xe3, 0x1c, 0xf9,
- 0x47, 0xc8, 0xec, 0xc5, 0xc5, 0x93, 0x4c, 0x34,
- 0x20, 0x6b, 0xee, 0x9a, 0xe6, 0x86, 0x57, 0x58,
- 0xd5, 0x58, 0xf1, 0x33, 0x10, 0x29, 0x9e, 0x93,
- 0x2f, 0xf5, 0x90, 0x00, 0x17, 0x67, 0x4f, 0x39,
- 0x18, 0xe1, 0xcf, 0x55, 0x78, 0xbb, 0xe6, 0x29,
- 0x3e, 0x77, 0xd5, 0x48, 0xb7, 0x42, 0x72, 0x53,
- 0x27, 0xfa, 0x5b, 0xe0, 0x36, 0x14, 0x97, 0xb8,
- 0x9b, 0x3c, 0x09, 0x77, 0xc1, 0x0a, 0xe4, 0xa2,
- 0x63, 0xfc, 0xbe, 0x5c, 0x17, 0xcf, 0x01, 0xf5,
- 0x03, 0x0f, 0x17, 0xbc, 0x93, 0xdd, 0x5f, 0xe2,
- 0xf3, 0x08, 0xa8, 0xb1, 0x85, 0xb6, 0x34, 0x3f,
- 0x87, 0x42, 0xa5, 0x42, 0x3b, 0x0e, 0xd6, 0x83,
- 0x6a, 0xfd, 0x5d, 0xc9, 0x67, 0xd5, 0x51, 0xc9,
- 0x2a, 0x4e, 0x91, 0xb0, 0x59, 0xb2, 0x0f, 0xa2,
- 0xe6, 0x47, 0x73, 0xc2, 0xa2, 0xae, 0xbb, 0xc8,
- 0x42, 0xa3, 0x2a, 0x27, 0x29, 0x48, 0x8c, 0x54,
- 0x6c, 0xec, 0x00, 0x2a, 0x42, 0xa3, 0x7a, 0x0f,
- 0x12, 0x66, 0x6b, 0x96, 0xf6, 0xd0, 0x56, 0x4f,
- 0x49, 0x5c, 0x47, 0xec, 0x05, 0x62, 0x54, 0xb2,
- 0x64, 0x5a, 0x69, 0x1f, 0x19, 0xb4, 0x84, 0x5c,
- 0xbe, 0x48, 0x8e, 0xfc, 0x58, 0x21, 0xce, 0xfa,
- 0xaa, 0x84, 0xd2, 0xc1, 0x08, 0xb3, 0x87, 0x0f,
- 0x4f, 0xa3, 0x3a, 0xb6, 0x44, 0xbe, 0x2e, 0x9a,
- 0xdd, 0xb5, 0x44, 0x80, 0xca, 0xf4, 0xc3, 0x6e,
- 0xba, 0x93, 0x77, 0xe0, 0x53, 0xfb, 0x37, 0xfb,
- 0x88, 0xc3, 0x1f, 0x25, 0xde, 0x3e, 0x11, 0xf4,
- 0x89, 0xe7, 0xd1, 0x3b, 0xb4, 0x23, 0xcb, 0x70,
- 0xba, 0x35, 0x97, 0x7c, 0xbe, 0x84, 0x13, 0xcf,
- 0xe0, 0x4d, 0x33, 0x91, 0x71, 0x85, 0xbb, 0x4b,
- 0x97, 0x32, 0x5d, 0xa0, 0xb9, 0x8f, 0xdc, 0x27,
- 0x5a, 0xeb, 0x71, 0xf1, 0xd5, 0x0d, 0x65, 0xb4,
- 0x22, 0x81, 0xde, 0xa7, 0x58, 0x20, 0x0b, 0x18,
- 0x11, 0x76, 0x5c, 0xe6, 0x6a, 0x2c, 0x99, 0x69,
- 0xdc, 0xed, 0x67, 0x08, 0x5d, 0x5e, 0xe9, 0x1e,
- 0x55, 0x70, 0xc1, 0x5a, 0x76, 0x1b, 0x8d, 0x2e,
- 0x0d, 0xf9, 0xcc, 0x30, 0x8c, 0x44, 0x0f, 0x63,
- 0x8c, 0x42, 0x8a, 0x9f, 0x4c, 0xd1, 0x48, 0x28,
- 0x8a, 0xf5, 0x56, 0x2e, 0x23, 0x12, 0xfe, 0x67,
- 0x9a, 0x13, 0x65, 0x75, 0x83, 0xf1, 0x3c, 0x98,
- 0x07, 0x6b, 0xb7, 0x27, 0x5b, 0xf0, 0x70, 0xda,
- 0x30, 0xf8, 0x74, 0x4e, 0x7a, 0x32, 0x84, 0xcc,
- 0x0e, 0xcd, 0x80, 0x8b, 0x82, 0x31, 0x9a, 0x48,
- 0xcf, 0x75, 0x00, 0x1f, 0x4f, 0xe0, 0x8e, 0xa3,
- 0x6a, 0x2c, 0xd4, 0x73, 0x4c, 0x63, 0x7c, 0xa6,
- 0x4d, 0x5e, 0xfd, 0x43, 0x3b, 0x27, 0xe1, 0x5e,
- 0xa3, 0xa9, 0x5c, 0x3b, 0x60, 0xdd, 0xc6, 0x8d,
- 0x5a, 0xf1, 0x3e, 0x89, 0x4b, 0x24, 0xcf, 0x01,
- 0x3a, 0x2d, 0x44, 0xe7, 0xda, 0xe7, 0xa1, 0xac,
- 0x11, 0x05, 0x0c, 0xa9, 0x7a, 0x82, 0x8c, 0x5c,
- 0x29, 0x68, 0x9c, 0x73, 0x13, 0xcc, 0x67, 0x32,
- 0x11, 0x5e, 0xe5, 0xcc, 0x8c, 0xf5, 0xa7, 0x52,
- 0x83, 0x9a, 0x70, 0xef, 0xde, 0x55, 0x9c, 0xc7,
- 0x8a, 0xed, 0xad, 0x28, 0x4a, 0xc5, 0x92, 0x6d,
- 0x8e, 0x47, 0xca, 0xe3, 0xf8, 0x77, 0xb5, 0x26,
- 0x64, 0x84, 0xc2, 0xf1, 0xd7, 0xae, 0x0c, 0xb9,
- 0x39, 0x0f, 0x43, 0x6b, 0xe9, 0xe0, 0x09, 0x4b,
- 0xe5, 0xe3, 0x17, 0xa6, 0x68, 0x69, 0x46, 0xf4,
- 0xf0, 0x68, 0x7f, 0x2f, 0x1c, 0x7e, 0x4c, 0xd2,
- 0xb5, 0xc6, 0x16, 0x85, 0xcf, 0x02, 0x4c, 0x89,
- 0x0b, 0x25, 0xb0, 0xeb, 0xf3, 0x77, 0x08, 0x6a,
- 0x46, 0x5c, 0xf6, 0x2f, 0xf1, 0x24, 0xc3, 0x4d,
- 0x80, 0x60, 0x4d, 0x69, 0x98, 0xde, 0xc7, 0xa1,
- 0xf6, 0x4e, 0x18, 0x0c, 0x2a, 0xb0, 0xb2, 0xe0,
- 0x46, 0xe7, 0x49, 0x37, 0xc8, 0x5a, 0x23, 0x24,
- 0xe3, 0x0f, 0xcc, 0x92, 0xb4, 0x8d, 0xdc, 0x9e
-};
-
-static const uint8_t AES_CBC_ciphertext_1280B[] = {
- 0x91, 0x99, 0x5e, 0x9e, 0x84, 0xff, 0x59, 0x45,
- 0xc1, 0xf4, 0xbc, 0x9c, 0xb9, 0x30, 0x6c, 0x51,
- 0x73, 0x52, 0xb4, 0x44, 0x09, 0x79, 0xe2, 0x89,
- 0x75, 0xeb, 0x54, 0x26, 0xce, 0xd8, 0x24, 0x98,
- 0xaa, 0xf8, 0x13, 0x16, 0x68, 0x58, 0xc4, 0x82,
- 0x0e, 0x31, 0xd3, 0x6a, 0x13, 0x58, 0x31, 0xe9,
- 0x3a, 0xc1, 0x8b, 0xc5, 0x3f, 0x50, 0x42, 0xd1,
- 0x93, 0xe4, 0x9b, 0x65, 0x2b, 0xf4, 0x1d, 0x9e,
- 0x2d, 0xdb, 0x48, 0xef, 0x9a, 0x01, 0x68, 0xb6,
- 0xea, 0x7a, 0x2b, 0xad, 0xfe, 0x77, 0x44, 0x7e,
- 0x5a, 0xc5, 0x64, 0xb4, 0xfe, 0x5c, 0x80, 0xf3,
- 0x20, 0x7e, 0xaf, 0x5b, 0xf8, 0xd1, 0x38, 0xa0,
- 0x8d, 0x09, 0x77, 0x06, 0xfe, 0xf5, 0xf4, 0xe4,
- 0xee, 0xb8, 0x95, 0x27, 0xed, 0x07, 0xb8, 0xaa,
- 0x25, 0xb4, 0xe1, 0x4c, 0xeb, 0x3f, 0xdb, 0x39,
- 0x66, 0x28, 0x1b, 0x60, 0x42, 0x8b, 0x99, 0xd9,
- 0x49, 0xd6, 0x8c, 0xa4, 0x9d, 0xd8, 0x93, 0x58,
- 0x8f, 0xfa, 0xd3, 0xf7, 0x37, 0x9c, 0x88, 0xab,
- 0x16, 0x50, 0xfe, 0x01, 0x1f, 0x88, 0x48, 0xbe,
- 0x21, 0xa9, 0x90, 0x9e, 0x73, 0xe9, 0x82, 0xf7,
- 0xbf, 0x4b, 0x43, 0xf4, 0xbf, 0x22, 0x3c, 0x45,
- 0x47, 0x95, 0x5b, 0x49, 0x71, 0x07, 0x1c, 0x8b,
- 0x49, 0xa4, 0xa3, 0x49, 0xc4, 0x5f, 0xb1, 0xf5,
- 0xe3, 0x6b, 0xf1, 0xdc, 0xea, 0x92, 0x7b, 0x29,
- 0x40, 0xc9, 0x39, 0x5f, 0xdb, 0xbd, 0xf3, 0x6a,
- 0x09, 0x9b, 0x2a, 0x5e, 0xc7, 0x0b, 0x25, 0x94,
- 0x55, 0x71, 0x9c, 0x7e, 0x0e, 0xb4, 0x08, 0x12,
- 0x8c, 0x6e, 0x77, 0xb8, 0x29, 0xf1, 0xc6, 0x71,
- 0x04, 0x40, 0x77, 0x18, 0x3f, 0x01, 0x09, 0x9c,
- 0x23, 0x2b, 0x5d, 0x2a, 0x88, 0x20, 0x23, 0x59,
- 0x74, 0x2a, 0x67, 0x8f, 0xb7, 0xba, 0x38, 0x9f,
- 0x0f, 0xcf, 0x94, 0xdf, 0xe1, 0x8f, 0x35, 0x5e,
- 0x34, 0x0c, 0x32, 0x92, 0x2b, 0x23, 0x81, 0xf4,
- 0x73, 0xa0, 0x5a, 0x2a, 0xbd, 0xa6, 0x6b, 0xae,
- 0x43, 0xe2, 0xdc, 0x01, 0xc1, 0xc6, 0xc3, 0x04,
- 0x06, 0xbb, 0xb0, 0x89, 0xb3, 0x4e, 0xbd, 0x81,
- 0x1b, 0x03, 0x63, 0x93, 0xed, 0x4e, 0xf6, 0xe5,
- 0x94, 0x6f, 0xd6, 0xf3, 0x20, 0xf3, 0xbc, 0x30,
- 0xc5, 0xd6, 0xbe, 0x1c, 0x05, 0x34, 0x26, 0x4d,
- 0x46, 0x5e, 0x56, 0x63, 0xfb, 0xdb, 0xcd, 0xed,
- 0xb0, 0x7f, 0x83, 0x94, 0x55, 0x54, 0x2f, 0xab,
- 0xc9, 0xb7, 0x16, 0x4f, 0x9e, 0x93, 0x25, 0xd7,
- 0x9f, 0x39, 0x2b, 0x63, 0xcf, 0x1e, 0xa3, 0x0e,
- 0x28, 0x47, 0x8a, 0x5f, 0x40, 0x02, 0x89, 0x1f,
- 0x83, 0xe7, 0x87, 0xd1, 0x90, 0x17, 0xb8, 0x27,
- 0x64, 0xe1, 0xe1, 0x48, 0x5a, 0x55, 0x74, 0x99,
- 0x27, 0x9d, 0x05, 0x67, 0xda, 0x70, 0x12, 0x8f,
- 0x94, 0x96, 0xfd, 0x36, 0xa4, 0x1d, 0x22, 0xe5,
- 0x0b, 0xe5, 0x2f, 0x38, 0x55, 0xa3, 0x5d, 0x0b,
- 0xcf, 0xd4, 0xa9, 0xb8, 0xd6, 0x9a, 0x16, 0x2e,
- 0x6c, 0x4a, 0x25, 0x51, 0x7a, 0x09, 0x48, 0xdd,
- 0xf0, 0xa3, 0x5b, 0x08, 0x1e, 0x2f, 0x03, 0x91,
- 0x80, 0xe8, 0x0f, 0xe9, 0x5a, 0x2f, 0x90, 0xd3,
- 0x64, 0xed, 0xd7, 0x51, 0x17, 0x66, 0x53, 0x40,
- 0x43, 0x74, 0xef, 0x0a, 0x0d, 0x49, 0x41, 0xf2,
- 0x67, 0x6e, 0xea, 0x14, 0xc8, 0x74, 0xd6, 0xa9,
- 0xb9, 0x6a, 0xe3, 0xec, 0x7d, 0xe8, 0x6a, 0x21,
- 0x3a, 0x52, 0x42, 0xfe, 0x9a, 0x15, 0x6d, 0x60,
- 0x64, 0x88, 0xc5, 0xb2, 0x8b, 0x15, 0x2c, 0xff,
- 0xe2, 0x35, 0xc3, 0xee, 0x9f, 0xcd, 0x82, 0xd9,
- 0x14, 0x35, 0x2a, 0xb7, 0xf5, 0x2f, 0x7b, 0xbc,
- 0x01, 0xfd, 0xa8, 0xe0, 0x21, 0x4e, 0x73, 0xf9,
- 0xf2, 0xb0, 0x79, 0xc9, 0x10, 0x52, 0x8f, 0xa8,
- 0x3e, 0x3b, 0xbe, 0xc5, 0xde, 0xf6, 0x53, 0xe3,
- 0x1c, 0x25, 0x3a, 0x1f, 0x13, 0xbf, 0x13, 0xbb,
- 0x94, 0xc2, 0x97, 0x43, 0x64, 0x47, 0x8f, 0x76,
- 0xd7, 0xaa, 0xeb, 0xa4, 0x03, 0x50, 0x0c, 0x10,
- 0x50, 0xd8, 0xf7, 0x75, 0x52, 0x42, 0xe2, 0x94,
- 0x67, 0xf4, 0x60, 0xfb, 0x21, 0x9b, 0x7a, 0x05,
- 0x50, 0x7c, 0x1b, 0x4a, 0x8b, 0x29, 0xe1, 0xac,
- 0xd7, 0x99, 0xfd, 0x0d, 0x65, 0x92, 0xcd, 0x23,
- 0xa7, 0x35, 0x8e, 0x13, 0xf2, 0xe4, 0x10, 0x74,
- 0xc6, 0x4f, 0x19, 0xf7, 0x01, 0x0b, 0x46, 0xab,
- 0xef, 0x8d, 0x4a, 0x4a, 0xfa, 0xda, 0xf3, 0xfb,
- 0x40, 0x28, 0x88, 0xa2, 0x65, 0x98, 0x4d, 0x88,
- 0xc7, 0xbf, 0x00, 0xc8, 0xd0, 0x91, 0xcb, 0x89,
- 0x2f, 0xb0, 0x85, 0xfc, 0xa1, 0xc1, 0x9e, 0x83,
- 0x88, 0xad, 0x95, 0xc0, 0x31, 0xa0, 0xad, 0xa2,
- 0x42, 0xb5, 0xe7, 0x55, 0xd4, 0x93, 0x5a, 0x74,
- 0x4e, 0x41, 0xc3, 0xcf, 0x96, 0x83, 0x46, 0xa1,
- 0xb7, 0x5b, 0xb1, 0x34, 0x67, 0x4e, 0xb1, 0xd7,
- 0x40, 0x20, 0x72, 0xe9, 0xc8, 0x74, 0xb7, 0xde,
- 0x72, 0x29, 0x77, 0x4c, 0x74, 0x7e, 0xcc, 0x18,
- 0xa5, 0x8d, 0x79, 0x8c, 0xd6, 0x6e, 0xcb, 0xd9,
- 0xe1, 0x61, 0xe7, 0x36, 0xbc, 0x37, 0xea, 0xee,
- 0xd8, 0x3c, 0x5e, 0x7c, 0x47, 0x50, 0xd5, 0xec,
- 0x37, 0xc5, 0x63, 0xc3, 0xc9, 0x99, 0x23, 0x9f,
- 0x64, 0x39, 0xdf, 0x13, 0x96, 0x6d, 0xea, 0x08,
- 0x0c, 0x27, 0x2d, 0xfe, 0x0f, 0xc2, 0xa3, 0x97,
- 0x04, 0x12, 0x66, 0x0d, 0x94, 0xbf, 0xbe, 0x3e,
- 0xb9, 0xcf, 0x8e, 0xc1, 0x9d, 0xb1, 0x64, 0x17,
- 0x54, 0x92, 0x3f, 0x0a, 0x51, 0xc8, 0xf5, 0x82,
- 0x98, 0x73, 0x03, 0xc0, 0x5a, 0x51, 0x01, 0x67,
- 0xb4, 0x01, 0x04, 0x06, 0xbc, 0x37, 0xde, 0x96,
- 0x23, 0x3c, 0xce, 0x98, 0x3f, 0xd6, 0x51, 0x1b,
- 0x01, 0x83, 0x0a, 0x1c, 0xf9, 0xeb, 0x7e, 0x72,
- 0xa9, 0x51, 0x23, 0xc8, 0xd7, 0x2f, 0x12, 0xbc,
- 0x08, 0xac, 0x07, 0xe7, 0xa7, 0xe6, 0x46, 0xae,
- 0x54, 0xa3, 0xc2, 0xf2, 0x05, 0x2d, 0x06, 0x5e,
- 0xfc, 0xe2, 0xa2, 0x23, 0xac, 0x86, 0xf2, 0x54,
- 0x83, 0x4a, 0xb6, 0x48, 0x93, 0xa1, 0x78, 0xc2,
- 0x07, 0xec, 0x82, 0xf0, 0x74, 0xa9, 0x18, 0xe9,
- 0x53, 0x44, 0x49, 0xc2, 0x94, 0xf8, 0x94, 0x92,
- 0x08, 0x3f, 0xbf, 0xa6, 0xe5, 0xc6, 0x03, 0x8a,
- 0xc6, 0x90, 0x48, 0x6c, 0xee, 0xbd, 0x44, 0x92,
- 0x1f, 0x2a, 0xce, 0x1d, 0xb8, 0x31, 0xa2, 0x9d,
- 0x24, 0x93, 0xa8, 0x9f, 0x36, 0x00, 0x04, 0x7b,
- 0xcb, 0x93, 0x59, 0xa1, 0x53, 0xdb, 0x13, 0x7a,
- 0x54, 0xb1, 0x04, 0xdb, 0xce, 0x48, 0x4f, 0xe5,
- 0x2f, 0xcb, 0xdf, 0x8f, 0x50, 0x7c, 0xfc, 0x76,
- 0x80, 0xb4, 0xdc, 0x3b, 0xc8, 0x98, 0x95, 0xf5,
- 0x50, 0xba, 0x70, 0x5a, 0x97, 0xd5, 0xfc, 0x98,
- 0x4d, 0xf3, 0x61, 0x0f, 0xcf, 0xac, 0x49, 0x0a,
- 0xdb, 0xc1, 0x42, 0x8f, 0xb6, 0x29, 0xd5, 0x65,
- 0xef, 0x83, 0xf1, 0x30, 0x4b, 0x84, 0xd0, 0x69,
- 0xde, 0xd2, 0x99, 0xe5, 0xec, 0xd3, 0x90, 0x86,
- 0x39, 0x2a, 0x6e, 0xd5, 0x32, 0xe3, 0x0d, 0x2d,
- 0x01, 0x8b, 0x17, 0x55, 0x1d, 0x65, 0x57, 0xbf,
- 0xd8, 0x75, 0xa4, 0x85, 0xb6, 0x4e, 0x35, 0x14,
- 0x58, 0xe4, 0x89, 0xb8, 0x7a, 0x58, 0x86, 0x0c,
- 0xbd, 0x8b, 0x05, 0x7b, 0x63, 0xc0, 0x86, 0x80,
- 0x33, 0x46, 0xd4, 0x9b, 0xb6, 0x0a, 0xeb, 0x6c,
- 0xae, 0xd6, 0x57, 0x7a, 0xc7, 0x59, 0x33, 0xa0,
- 0xda, 0xa4, 0x12, 0xbf, 0x52, 0x22, 0x05, 0x8d,
- 0xeb, 0xee, 0xd5, 0xec, 0xea, 0x29, 0x9b, 0x76,
- 0x95, 0x50, 0x6d, 0x99, 0xe1, 0x45, 0x63, 0x09,
- 0x16, 0x5f, 0xb0, 0xf2, 0x5b, 0x08, 0x33, 0xdd,
- 0x8f, 0xb7, 0x60, 0x7a, 0x8e, 0xc6, 0xfc, 0xac,
- 0xa9, 0x56, 0x2c, 0xa9, 0x8b, 0x74, 0x33, 0xad,
- 0x2a, 0x7e, 0x96, 0xb6, 0xba, 0x22, 0x28, 0xcf,
- 0x4d, 0x96, 0xb7, 0xd1, 0xfa, 0x99, 0x4a, 0x61,
- 0xe6, 0x84, 0xd1, 0x94, 0xca, 0xf5, 0x86, 0xb0,
- 0xba, 0x34, 0x7a, 0x04, 0xcc, 0xd4, 0x81, 0xcd,
- 0xd9, 0x86, 0xb6, 0xe0, 0x5a, 0x6f, 0x9b, 0x99,
- 0xf0, 0xdf, 0x49, 0xae, 0x6d, 0xc2, 0x54, 0x67,
- 0xe0, 0xb4, 0x34, 0x2d, 0x1c, 0x46, 0xdf, 0x73,
- 0x3b, 0x45, 0x43, 0xe7, 0x1f, 0xa3, 0x36, 0x35,
- 0x25, 0x33, 0xd9, 0xc0, 0x54, 0x38, 0x6e, 0x6b,
- 0x80, 0xcf, 0x50, 0xa4, 0xb6, 0x21, 0x17, 0xfd,
- 0x9b, 0x5c, 0x36, 0xca, 0xcc, 0x73, 0x73, 0xad,
- 0xe0, 0x57, 0x77, 0x90, 0x0e, 0x7f, 0x0f, 0x87,
- 0x7f, 0xdb, 0x73, 0xbf, 0xda, 0xc2, 0xb3, 0x05,
- 0x22, 0x06, 0xf5, 0xa3, 0xfc, 0x1e, 0x8f, 0xda,
- 0xcf, 0x49, 0xd6, 0xb3, 0x66, 0x2c, 0xb5, 0x00,
- 0xaf, 0x85, 0x6e, 0xb8, 0x5b, 0x8c, 0xa1, 0xa4,
- 0x21, 0xce, 0x40, 0xf3, 0x98, 0xac, 0xec, 0x88,
- 0x62, 0x43, 0x2a, 0xac, 0xca, 0xcf, 0xb9, 0x30,
- 0xeb, 0xfc, 0xef, 0xf0, 0x6e, 0x64, 0x6d, 0xe7,
- 0x54, 0x88, 0x6b, 0x22, 0x29, 0xbe, 0xa5, 0x8c,
- 0x31, 0x23, 0x3b, 0x4a, 0x80, 0x37, 0xe6, 0xd0,
- 0x05, 0xfc, 0x10, 0x0e, 0xdd, 0xbb, 0x00, 0xc5,
- 0x07, 0x20, 0x59, 0xd3, 0x41, 0x17, 0x86, 0x46,
- 0xab, 0x68, 0xf6, 0x48, 0x3c, 0xea, 0x5a, 0x06,
- 0x30, 0x21, 0x19, 0xed, 0x74, 0xbe, 0x0b, 0x97,
- 0xee, 0x91, 0x35, 0x94, 0x1f, 0xcb, 0x68, 0x7f,
- 0xe4, 0x48, 0xb0, 0x16, 0xfb, 0xf0, 0x74, 0xdb,
- 0x06, 0x59, 0x2e, 0x5a, 0x9c, 0xce, 0x8f, 0x7d,
- 0xba, 0x48, 0xd5, 0x3f, 0x5c, 0xb0, 0xc2, 0x33,
- 0x48, 0x60, 0x17, 0x08, 0x85, 0xba, 0xff, 0xb9,
- 0x34, 0x0a, 0x3d, 0x8f, 0x21, 0x13, 0x12, 0x1b
-};
-
-static const uint8_t AES_CBC_ciphertext_1536B[] = {
- 0x89, 0x93, 0x05, 0x99, 0xa9, 0xed, 0xea, 0x62,
- 0xc9, 0xda, 0x51, 0x15, 0xce, 0x42, 0x91, 0xc3,
- 0x80, 0xc8, 0x03, 0x88, 0xc2, 0x63, 0xda, 0x53,
- 0x1a, 0xf3, 0xeb, 0xd5, 0xba, 0x6f, 0x23, 0xb2,
- 0xed, 0x8f, 0x89, 0xb1, 0xb3, 0xca, 0x90, 0x7a,
- 0xdd, 0x3f, 0xf6, 0xca, 0x86, 0x58, 0x54, 0xbc,
- 0xab, 0x0f, 0xf4, 0xab, 0x6d, 0x5d, 0x42, 0xd0,
- 0x17, 0x49, 0x17, 0xd1, 0x93, 0xea, 0xe8, 0x22,
- 0xc1, 0x34, 0x9f, 0x3a, 0x3b, 0xaa, 0xe9, 0x1b,
- 0x93, 0xff, 0x6b, 0x68, 0xba, 0xe6, 0xd2, 0x39,
- 0x3d, 0x55, 0x34, 0x8f, 0x98, 0x86, 0xb4, 0xd8,
- 0x7c, 0x0d, 0x3e, 0x01, 0x63, 0x04, 0x01, 0xff,
- 0x16, 0x0f, 0x51, 0x5f, 0x73, 0x53, 0xf0, 0x3a,
- 0x38, 0xb4, 0x4d, 0x8d, 0xaf, 0xa3, 0xca, 0x2f,
- 0x6f, 0xdf, 0xc0, 0x41, 0x6c, 0x48, 0x60, 0x1a,
- 0xe4, 0xe7, 0x8a, 0x65, 0x6f, 0x8d, 0xd7, 0xe1,
- 0x10, 0xab, 0x78, 0x5b, 0xb9, 0x69, 0x1f, 0xe0,
- 0x5c, 0xf1, 0x19, 0x12, 0x21, 0xc7, 0x51, 0xbc,
- 0x61, 0x5f, 0xc0, 0x36, 0x17, 0xc0, 0x28, 0xd9,
- 0x51, 0xcb, 0x43, 0xd9, 0xfa, 0xd1, 0xad, 0x79,
- 0x69, 0x86, 0x49, 0xc5, 0xe5, 0x69, 0x27, 0xce,
- 0x22, 0xd0, 0xe1, 0x6a, 0xf9, 0x02, 0xca, 0x6c,
- 0x34, 0xc7, 0xb8, 0x02, 0xc1, 0x38, 0x7f, 0xd5,
- 0x15, 0xf5, 0xd6, 0xeb, 0xf9, 0x30, 0x40, 0x43,
- 0xea, 0x87, 0xde, 0x35, 0xf6, 0x83, 0x59, 0x09,
- 0x68, 0x62, 0x00, 0x87, 0xb8, 0xe7, 0xca, 0x05,
- 0x0f, 0xac, 0x42, 0x58, 0x45, 0xaa, 0xc9, 0x9b,
- 0xfd, 0x2a, 0xda, 0x65, 0x33, 0x93, 0x9d, 0xc6,
- 0x93, 0x8d, 0xe2, 0xc5, 0x71, 0xc1, 0x5c, 0x13,
- 0xde, 0x7b, 0xd4, 0xb9, 0x4c, 0x35, 0x61, 0x85,
- 0x90, 0x78, 0xf7, 0x81, 0x98, 0x45, 0x99, 0x24,
- 0x58, 0x73, 0x28, 0xf8, 0x31, 0xab, 0x54, 0x2e,
- 0xc0, 0x38, 0x77, 0x25, 0x5c, 0x06, 0x9c, 0xc3,
- 0x69, 0x21, 0x92, 0x76, 0xe1, 0x16, 0xdc, 0xa9,
- 0xee, 0xb6, 0x80, 0x66, 0x43, 0x11, 0x24, 0xb3,
- 0x07, 0x17, 0x89, 0x0f, 0xcb, 0xe0, 0x60, 0xa8,
- 0x9d, 0x06, 0x4b, 0x6e, 0x72, 0xb7, 0xbc, 0x4f,
- 0xb8, 0xc0, 0x80, 0xa2, 0xfb, 0x46, 0x5b, 0x8f,
- 0x11, 0x01, 0x92, 0x9d, 0x37, 0x09, 0x98, 0xc8,
- 0x0a, 0x46, 0xae, 0x12, 0xac, 0x61, 0x3f, 0xe7,
- 0x41, 0x1a, 0xaa, 0x2e, 0xdc, 0xd7, 0x2a, 0x47,
- 0xee, 0xdf, 0x08, 0xd1, 0xff, 0xea, 0x13, 0xc6,
- 0x05, 0xdb, 0x29, 0xcc, 0x03, 0xba, 0x7b, 0x6d,
- 0x40, 0xc1, 0xc9, 0x76, 0x75, 0x03, 0x7a, 0x71,
- 0xc9, 0x5f, 0xd9, 0xe0, 0x61, 0x69, 0x36, 0x8f,
- 0xb2, 0xbc, 0x28, 0xf3, 0x90, 0x71, 0xda, 0x5f,
- 0x08, 0xd5, 0x0d, 0xc1, 0xe6, 0xbd, 0x2b, 0xc6,
- 0x6c, 0x42, 0xfd, 0xbf, 0x10, 0xe8, 0x5f, 0x87,
- 0x3d, 0x21, 0x42, 0x85, 0x01, 0x0a, 0xbf, 0x8e,
- 0x49, 0xd3, 0x9c, 0x89, 0x3b, 0xea, 0xe1, 0xbf,
- 0xe9, 0x9b, 0x5e, 0x0e, 0xb8, 0xeb, 0xcd, 0x3a,
- 0xf6, 0x29, 0x41, 0x35, 0xdd, 0x9b, 0x13, 0x24,
- 0xe0, 0x1d, 0x8a, 0xcb, 0x20, 0xf8, 0x41, 0x51,
- 0x3e, 0x23, 0x8c, 0x67, 0x98, 0x39, 0x53, 0x77,
- 0x2a, 0x68, 0xf4, 0x3c, 0x7e, 0xd6, 0xc4, 0x6e,
- 0xf1, 0x53, 0xe9, 0xd8, 0x5c, 0xc1, 0xa9, 0x38,
- 0x6f, 0x5e, 0xe4, 0xd4, 0x29, 0x1c, 0x6c, 0xee,
- 0x2f, 0xea, 0xde, 0x61, 0x71, 0x5a, 0xea, 0xce,
- 0x23, 0x6e, 0x1b, 0x16, 0x43, 0xb7, 0xc0, 0xe3,
- 0x87, 0xa1, 0x95, 0x1e, 0x97, 0x4d, 0xea, 0xa6,
- 0xf7, 0x25, 0xac, 0x82, 0x2a, 0xd3, 0xa6, 0x99,
- 0x75, 0xdd, 0xc1, 0x55, 0x32, 0x6b, 0xea, 0x33,
- 0x88, 0xce, 0x06, 0xac, 0x15, 0x39, 0x19, 0xa3,
- 0x59, 0xaf, 0x7a, 0x1f, 0xd9, 0x72, 0x5e, 0xf7,
- 0x4c, 0xf3, 0x5d, 0x6b, 0xf2, 0x16, 0x92, 0xa8,
- 0x9e, 0x3d, 0xd4, 0x4c, 0x72, 0x55, 0x4e, 0x4a,
- 0xf7, 0x8b, 0x2f, 0x67, 0x5a, 0x90, 0xb7, 0xcf,
- 0x16, 0xd3, 0x7b, 0x5a, 0x9a, 0xc8, 0x9f, 0xbf,
- 0x01, 0x76, 0x3b, 0x86, 0x2c, 0x2a, 0x78, 0x10,
- 0x70, 0x05, 0x38, 0xf9, 0xdd, 0x2a, 0x1d, 0x00,
- 0x25, 0xb7, 0x10, 0xac, 0x3b, 0x3c, 0x4d, 0x3c,
- 0x01, 0x68, 0x3c, 0x5a, 0x29, 0xc2, 0xa0, 0x1b,
- 0x95, 0x67, 0xf9, 0x0a, 0x60, 0xb7, 0x11, 0x9c,
- 0x40, 0x45, 0xd7, 0xb0, 0xda, 0x49, 0x87, 0xcd,
- 0xb0, 0x9b, 0x61, 0x8c, 0xf4, 0x0d, 0x94, 0x1d,
- 0x79, 0x66, 0x13, 0x0b, 0xc6, 0x6b, 0x19, 0xee,
- 0xa0, 0x6b, 0x64, 0x7d, 0xc4, 0xff, 0x98, 0x72,
- 0x60, 0xab, 0x7f, 0x0f, 0x4d, 0x5d, 0x6b, 0xc3,
- 0xba, 0x5e, 0x0d, 0x04, 0xd9, 0x59, 0x17, 0xd0,
- 0x64, 0xbe, 0xfb, 0x58, 0xfc, 0xed, 0x18, 0xf6,
- 0xac, 0x19, 0xa4, 0xfd, 0x16, 0x59, 0x80, 0x58,
- 0xb8, 0x0f, 0x79, 0x24, 0x60, 0x18, 0x62, 0xa9,
- 0xa3, 0xa0, 0xe8, 0x81, 0xd6, 0xec, 0x5b, 0xfe,
- 0x5b, 0xb8, 0xa4, 0x00, 0xa9, 0xd0, 0x90, 0x17,
- 0xe5, 0x50, 0x3d, 0x2b, 0x12, 0x6e, 0x2a, 0x13,
- 0x65, 0x7c, 0xdf, 0xdf, 0xa7, 0xdd, 0x9f, 0x78,
- 0x5f, 0x8f, 0x4e, 0x90, 0xa6, 0x10, 0xe4, 0x7b,
- 0x68, 0x6b, 0xfd, 0xa9, 0x6d, 0x47, 0xfa, 0xec,
- 0x42, 0x35, 0x07, 0x12, 0x3e, 0x78, 0x23, 0x15,
- 0xff, 0xe2, 0x65, 0xc7, 0x47, 0x89, 0x2f, 0x97,
- 0x7c, 0xd7, 0x6b, 0x69, 0x35, 0x79, 0x6f, 0x85,
- 0xb4, 0xa9, 0x75, 0x04, 0x32, 0x9a, 0xfe, 0xf0,
- 0xce, 0xe3, 0xf1, 0xab, 0x15, 0x47, 0xe4, 0x9c,
- 0xc1, 0x48, 0x32, 0x3c, 0xbe, 0x44, 0x72, 0xc9,
- 0xaa, 0x50, 0x37, 0xa6, 0xbe, 0x41, 0xcf, 0xe8,
- 0x17, 0x4e, 0x37, 0xbe, 0xf1, 0x34, 0x2c, 0xd9,
- 0x60, 0x48, 0x09, 0xa5, 0x26, 0x00, 0x31, 0x77,
- 0x4e, 0xac, 0x7c, 0x89, 0x75, 0xe3, 0xde, 0x26,
- 0x4c, 0x32, 0x54, 0x27, 0x8e, 0x92, 0x26, 0x42,
- 0x85, 0x76, 0x01, 0x76, 0x62, 0x4c, 0x29, 0xe9,
- 0x38, 0x05, 0x51, 0x54, 0x97, 0xa3, 0x03, 0x59,
- 0x5e, 0xec, 0x0c, 0xe4, 0x96, 0xb7, 0x15, 0xa8,
- 0x41, 0x06, 0x2b, 0x78, 0x95, 0x24, 0xf6, 0x32,
- 0xc5, 0xec, 0xd7, 0x89, 0x28, 0x1e, 0xec, 0xb1,
- 0xc7, 0x21, 0x0c, 0xd3, 0x80, 0x7c, 0x5a, 0xe6,
- 0xb1, 0x3a, 0x52, 0x33, 0x84, 0x4e, 0x32, 0x6e,
- 0x7a, 0xf6, 0x43, 0x15, 0x5b, 0xa6, 0xba, 0xeb,
- 0xa8, 0xe4, 0xff, 0x4f, 0xbd, 0xbd, 0xa8, 0x5e,
- 0xbe, 0x27, 0xaf, 0xc5, 0xf7, 0x9e, 0xdf, 0x48,
- 0x22, 0xca, 0x6a, 0x0b, 0x3c, 0xd7, 0xe0, 0xdc,
- 0xf3, 0x71, 0x08, 0xdc, 0x28, 0x13, 0x08, 0xf2,
- 0x08, 0x1d, 0x9d, 0x7b, 0xd9, 0xde, 0x6f, 0xe6,
- 0xe8, 0x88, 0x18, 0xc2, 0xcd, 0x93, 0xc5, 0x38,
- 0x21, 0x68, 0x4c, 0x9a, 0xfb, 0xb6, 0x18, 0x16,
- 0x73, 0x2c, 0x1d, 0x6f, 0x95, 0xfb, 0x65, 0x4f,
- 0x7c, 0xec, 0x8d, 0x6c, 0xa8, 0xc0, 0x55, 0x28,
- 0xc6, 0xc3, 0xea, 0xeb, 0x05, 0xf5, 0x65, 0xeb,
- 0x53, 0xe1, 0x54, 0xef, 0xb8, 0x64, 0x98, 0x2d,
- 0x98, 0x9e, 0xc8, 0xfe, 0xa2, 0x07, 0x30, 0xf7,
- 0xf7, 0xae, 0xdb, 0x32, 0xf8, 0x71, 0x9d, 0x06,
- 0xdf, 0x9b, 0xda, 0x61, 0x7d, 0xdb, 0xae, 0x06,
- 0x24, 0x63, 0x74, 0xb6, 0xf3, 0x1b, 0x66, 0x09,
- 0x60, 0xff, 0x2b, 0x29, 0xf5, 0xa9, 0x9d, 0x61,
- 0x5d, 0x55, 0x10, 0x82, 0x21, 0xbb, 0x64, 0x0d,
- 0xef, 0x5c, 0xe3, 0x30, 0x1b, 0x60, 0x1e, 0x5b,
- 0xfe, 0x6c, 0xf5, 0x15, 0xa3, 0x86, 0x27, 0x58,
- 0x46, 0x00, 0x20, 0xcb, 0x86, 0x9a, 0x52, 0x29,
- 0x20, 0x68, 0x4d, 0x67, 0x88, 0x70, 0xc2, 0x31,
- 0xd8, 0xbb, 0xa5, 0xa7, 0x88, 0x7f, 0x66, 0xbc,
- 0xaa, 0x0f, 0xe1, 0x78, 0x7b, 0x97, 0x3c, 0xb7,
- 0xd7, 0xd8, 0x04, 0xe0, 0x09, 0x60, 0xc8, 0xd0,
- 0x9e, 0xe5, 0x6b, 0x31, 0x7f, 0x88, 0xfe, 0xc3,
- 0xfd, 0x89, 0xec, 0x76, 0x4b, 0xb3, 0xa7, 0x37,
- 0x03, 0xb7, 0xc6, 0x10, 0x7c, 0x9d, 0x0c, 0x75,
- 0xd3, 0x08, 0x14, 0x94, 0x03, 0x42, 0x25, 0x26,
- 0x85, 0xf7, 0xf0, 0x90, 0x06, 0x3e, 0x6f, 0x60,
- 0x52, 0x55, 0xd5, 0x0f, 0x79, 0x64, 0x69, 0x69,
- 0x46, 0xf9, 0x7f, 0x7f, 0x03, 0xf1, 0x1f, 0xdb,
- 0x39, 0x05, 0xba, 0x4a, 0x8f, 0x17, 0xe7, 0xba,
- 0xe2, 0x07, 0x7c, 0x1d, 0x9e, 0xbc, 0x94, 0xc0,
- 0x61, 0x59, 0x8e, 0x72, 0xaf, 0xfc, 0x99, 0xe4,
- 0xd5, 0xa8, 0xee, 0x0a, 0x48, 0x2d, 0x82, 0x8b,
- 0x34, 0x54, 0x8a, 0xce, 0xc7, 0xfa, 0xdd, 0xba,
- 0x54, 0xdf, 0xb3, 0x30, 0x33, 0x73, 0x2e, 0xd5,
- 0x52, 0xab, 0x49, 0x91, 0x4e, 0x0a, 0xd6, 0x2f,
- 0x67, 0xe4, 0xdd, 0x64, 0x48, 0x16, 0xd9, 0x85,
- 0xaa, 0x52, 0xa5, 0x0b, 0xd3, 0xb4, 0x2d, 0x77,
- 0x5e, 0x52, 0x77, 0x17, 0xcf, 0xbe, 0x88, 0x04,
- 0x01, 0x52, 0xe2, 0xf1, 0x46, 0xe2, 0x91, 0x30,
- 0x65, 0xcf, 0xc0, 0x65, 0x45, 0xc3, 0x7e, 0xf4,
- 0x2e, 0xb5, 0xaf, 0x6f, 0xab, 0x1a, 0xfa, 0x70,
- 0x35, 0xb8, 0x4f, 0x2d, 0x78, 0x90, 0x33, 0xb5,
- 0x9a, 0x67, 0xdb, 0x2f, 0x28, 0x32, 0xb6, 0x54,
- 0xab, 0x4c, 0x6b, 0x85, 0xed, 0x6c, 0x3e, 0x05,
- 0x2a, 0xc7, 0x32, 0xe8, 0xf5, 0xa3, 0x7b, 0x4e,
- 0x7b, 0x58, 0x24, 0x73, 0xf7, 0xfd, 0xc7, 0xc8,
- 0x6c, 0x71, 0x68, 0xb1, 0xf6, 0xc5, 0x9e, 0x1e,
- 0xe3, 0x5c, 0x25, 0xc0, 0x5b, 0x3e, 0x59, 0xa1,
- 0x18, 0x5a, 0xe8, 0xb5, 0xd1, 0x44, 0x13, 0xa3,
- 0xe6, 0x05, 0x76, 0xd2, 0x8d, 0x6e, 0x54, 0x68,
- 0x0c, 0xa4, 0x7b, 0x8b, 0xd3, 0x8c, 0x42, 0x13,
- 0x87, 0xda, 0xdf, 0x8f, 0xa5, 0x83, 0x7a, 0x42,
- 0x99, 0xb7, 0xeb, 0xe2, 0x79, 0xe0, 0xdb, 0xda,
- 0x33, 0xa8, 0x50, 0x3a, 0xd7, 0xe7, 0xd3, 0x61,
- 0x18, 0xb8, 0xaa, 0x2d, 0xc8, 0xd8, 0x2c, 0x28,
- 0xe5, 0x97, 0x0a, 0x7c, 0x6c, 0x7f, 0x09, 0xd7,
- 0x88, 0x80, 0xac, 0x12, 0xed, 0xf8, 0xc6, 0xb5,
- 0x2d, 0xd6, 0x63, 0x9b, 0x98, 0x35, 0x26, 0xde,
- 0xf6, 0x31, 0xee, 0x7e, 0xa0, 0xfb, 0x16, 0x98,
- 0xb1, 0x96, 0x1d, 0xee, 0xe3, 0x2f, 0xfb, 0x41,
- 0xdd, 0xea, 0x10, 0x1e, 0x03, 0x89, 0x18, 0xd2,
- 0x47, 0x0c, 0xa0, 0x57, 0xda, 0x76, 0x3a, 0x37,
- 0x2c, 0xe4, 0xf9, 0x77, 0xc8, 0x43, 0x5f, 0xcb,
- 0xd6, 0x85, 0xf7, 0x22, 0xe4, 0x32, 0x25, 0xa8,
- 0xdc, 0x21, 0xc0, 0xf5, 0x95, 0xb2, 0xf8, 0x83,
- 0xf0, 0x65, 0x61, 0x15, 0x48, 0x94, 0xb7, 0x03,
- 0x7f, 0x66, 0xa1, 0x39, 0x1f, 0xdd, 0xce, 0x96,
- 0xfe, 0x58, 0x81, 0x3d, 0x41, 0x11, 0x87, 0x13,
- 0x26, 0x1b, 0x6d, 0xf3, 0xca, 0x2e, 0x2c, 0x76,
- 0xd3, 0x2f, 0x6d, 0x49, 0x70, 0x53, 0x05, 0x96,
- 0xcc, 0x30, 0x2b, 0x83, 0xf2, 0xc6, 0xb2, 0x4b,
- 0x22, 0x13, 0x95, 0x42, 0xeb, 0x56, 0x4d, 0x22,
- 0xe6, 0x43, 0x6f, 0xba, 0xe7, 0x3b, 0xe5, 0x59,
- 0xce, 0x57, 0x88, 0x85, 0xb6, 0xbf, 0x15, 0x37,
- 0xb3, 0x7a, 0x7e, 0xc4, 0xbc, 0x99, 0xfc, 0xe4,
- 0x89, 0x00, 0x68, 0x39, 0xbc, 0x5a, 0xba, 0xab,
- 0x52, 0xab, 0xe6, 0x81, 0xfd, 0x93, 0x62, 0xe9,
- 0xb7, 0x12, 0xd1, 0x18, 0x1a, 0xb9, 0x55, 0x4a,
- 0x0f, 0xae, 0x35, 0x11, 0x04, 0x27, 0xf3, 0x42,
- 0x4e, 0xca, 0xdf, 0x9f, 0x12, 0x62, 0xea, 0x03,
- 0xc0, 0xa9, 0x22, 0x7b, 0x6c, 0x6c, 0xe3, 0xdf,
- 0x16, 0xad, 0x03, 0xc9, 0xfe, 0xa4, 0xdd, 0x4f
-};
-
-static const uint8_t AES_CBC_ciphertext_1792B[] = {
- 0x59, 0xcc, 0xfe, 0x8f, 0xb4, 0x9d, 0x0e, 0xd1,
- 0x85, 0xfc, 0x9b, 0x43, 0xc1, 0xb7, 0x54, 0x67,
- 0x01, 0xef, 0xb8, 0x71, 0x36, 0xdb, 0x50, 0x48,
- 0x7a, 0xea, 0xcf, 0xce, 0xba, 0x30, 0x10, 0x2e,
- 0x96, 0x2b, 0xfd, 0xcf, 0x00, 0xe3, 0x1f, 0xac,
- 0x66, 0x14, 0x30, 0x86, 0x49, 0xdb, 0x01, 0x8b,
- 0x07, 0xdd, 0x00, 0x9d, 0x0d, 0x5c, 0x19, 0x11,
- 0xe8, 0x44, 0x2b, 0x25, 0x70, 0xed, 0x7c, 0x33,
- 0x0d, 0xe3, 0x34, 0x93, 0x63, 0xad, 0x26, 0xb1,
- 0x11, 0x91, 0x34, 0x2e, 0x1d, 0x50, 0xaa, 0xd4,
- 0xef, 0x3a, 0x6d, 0xd7, 0x33, 0x20, 0x0d, 0x3f,
- 0x9b, 0xdd, 0xc3, 0xa5, 0xc5, 0xf1, 0x99, 0xdc,
- 0xea, 0x52, 0xda, 0x55, 0xea, 0xa2, 0x7a, 0xc5,
- 0x78, 0x44, 0x4a, 0x02, 0x33, 0x19, 0x62, 0x37,
- 0xf8, 0x8b, 0xd1, 0x0c, 0x21, 0xdf, 0x40, 0x19,
- 0x81, 0xea, 0xfb, 0x1c, 0xa7, 0xcc, 0x60, 0xfe,
- 0x63, 0x25, 0x8f, 0xf3, 0x73, 0x0f, 0x45, 0xe6,
- 0x6a, 0x18, 0xbf, 0xbe, 0xad, 0x92, 0x2a, 0x1e,
- 0x15, 0x65, 0x6f, 0xef, 0x92, 0xcd, 0x0e, 0x19,
- 0x3d, 0x42, 0xa8, 0xfc, 0x0d, 0x32, 0x58, 0xe0,
- 0x56, 0x9f, 0xd6, 0x9b, 0x8b, 0xec, 0xe0, 0x45,
- 0x4d, 0x7e, 0x73, 0x87, 0xff, 0x74, 0x92, 0x59,
- 0x60, 0x13, 0x93, 0xda, 0xec, 0xbf, 0xfa, 0x20,
- 0xb6, 0xe7, 0xdf, 0xc7, 0x10, 0xf5, 0x79, 0xb4,
- 0xd7, 0xac, 0xaf, 0x2b, 0x37, 0x52, 0x30, 0x1d,
- 0xbe, 0x0f, 0x60, 0x77, 0x3d, 0x03, 0x63, 0xa9,
- 0xae, 0xb1, 0xf3, 0xca, 0xca, 0xb4, 0x21, 0xd7,
- 0x6f, 0x2e, 0x5e, 0x9b, 0x68, 0x53, 0x80, 0xab,
- 0x30, 0x23, 0x0a, 0x72, 0x6b, 0xb1, 0xd8, 0x25,
- 0x5d, 0x3a, 0x62, 0x9b, 0x4f, 0x59, 0x3b, 0x79,
- 0xa8, 0x9e, 0x08, 0x6d, 0x37, 0xb0, 0xfc, 0x42,
- 0x51, 0x25, 0x86, 0xbd, 0x54, 0x5a, 0x95, 0x20,
- 0x6c, 0xac, 0xb9, 0x30, 0x1c, 0x03, 0xc9, 0x49,
- 0x38, 0x55, 0x31, 0x49, 0xed, 0xa9, 0x0e, 0xc3,
- 0x65, 0xb4, 0x68, 0x6b, 0x07, 0x4c, 0x0a, 0xf9,
- 0x21, 0x69, 0x7c, 0x9f, 0x28, 0x80, 0xe9, 0x49,
- 0x22, 0x7c, 0xec, 0x97, 0xf7, 0x70, 0xb4, 0xb8,
- 0x25, 0xe7, 0x80, 0x2c, 0x43, 0x24, 0x8a, 0x2e,
- 0xac, 0xa2, 0x84, 0x20, 0xe7, 0xf4, 0x6b, 0x86,
- 0x37, 0x05, 0xc7, 0x59, 0x04, 0x49, 0x2a, 0x99,
- 0x80, 0x46, 0x32, 0x19, 0xe6, 0x30, 0xce, 0xc0,
- 0xef, 0x6e, 0xec, 0xe5, 0x2f, 0x24, 0xc1, 0x78,
- 0x45, 0x02, 0xd3, 0x64, 0x99, 0xf5, 0xc7, 0xbc,
- 0x8f, 0x8c, 0x75, 0xb1, 0x0a, 0xc8, 0xc3, 0xbd,
- 0x5e, 0x7e, 0xbd, 0x0e, 0xdf, 0x4b, 0x96, 0x6a,
- 0xfd, 0x03, 0xdb, 0xd1, 0x31, 0x1e, 0x27, 0xf9,
- 0xe5, 0x83, 0x9a, 0xfc, 0x13, 0x4c, 0xd3, 0x04,
- 0xdb, 0xdb, 0x3f, 0x35, 0x93, 0x4e, 0x14, 0x6b,
- 0x00, 0x5c, 0xb6, 0x11, 0x50, 0xee, 0x61, 0x5c,
- 0x10, 0x5c, 0xd0, 0x90, 0x02, 0x2e, 0x12, 0xe0,
- 0x50, 0x44, 0xad, 0x75, 0xcd, 0x94, 0xcf, 0x92,
- 0xcb, 0xe3, 0xe8, 0x77, 0x4b, 0xd7, 0x1a, 0x7c,
- 0xdd, 0x6b, 0x49, 0x21, 0x7c, 0xe8, 0x2c, 0x25,
- 0x49, 0x86, 0x1e, 0x54, 0xae, 0xfc, 0x0e, 0x80,
- 0xb1, 0xd5, 0xa5, 0x23, 0xcf, 0xcc, 0x0e, 0x11,
- 0xe2, 0x7c, 0x3c, 0x25, 0x78, 0x64, 0x03, 0xa1,
- 0xdd, 0x9f, 0x74, 0x12, 0x7b, 0x21, 0xb5, 0x73,
- 0x15, 0x3c, 0xed, 0xad, 0x07, 0x62, 0x21, 0x79,
- 0xd4, 0x2f, 0x0d, 0x72, 0xe9, 0x7c, 0x6b, 0x96,
- 0x6e, 0xe5, 0x36, 0x4a, 0xd2, 0x38, 0xe1, 0xff,
- 0x6e, 0x26, 0xa4, 0xac, 0x83, 0x07, 0xe6, 0x67,
- 0x74, 0x6c, 0xec, 0x8b, 0x4b, 0x79, 0x33, 0x50,
- 0x2f, 0x8f, 0xa0, 0x8f, 0xfa, 0x38, 0x6a, 0xa2,
- 0x3a, 0x42, 0x85, 0x15, 0x90, 0xd0, 0xb3, 0x0d,
- 0x8a, 0xe4, 0x60, 0x03, 0xef, 0xf9, 0x65, 0x8a,
- 0x4e, 0x50, 0x8c, 0x65, 0xba, 0x61, 0x16, 0xc3,
- 0x93, 0xb7, 0x75, 0x21, 0x98, 0x25, 0x60, 0x6e,
- 0x3d, 0x68, 0xba, 0x7c, 0xe4, 0xf3, 0xd9, 0x9b,
- 0xfb, 0x7a, 0xed, 0x1f, 0xb3, 0x4b, 0x88, 0x74,
- 0x2c, 0xb8, 0x8c, 0x22, 0x95, 0xce, 0x90, 0xf1,
- 0xdb, 0x80, 0xa6, 0x39, 0xae, 0x82, 0xa1, 0xef,
- 0x75, 0xec, 0xfe, 0xf1, 0xe8, 0x04, 0xfd, 0x99,
- 0x1b, 0x5f, 0x45, 0x87, 0x4f, 0xfa, 0xa2, 0x3e,
- 0x3e, 0xb5, 0x01, 0x4b, 0x46, 0xeb, 0x13, 0x9a,
- 0xe4, 0x7d, 0x03, 0x87, 0xb1, 0x59, 0x91, 0x8e,
- 0x37, 0xd3, 0x16, 0xce, 0xef, 0x4b, 0xe9, 0x46,
- 0x8d, 0x2a, 0x50, 0x2f, 0x41, 0xd3, 0x7b, 0xcf,
- 0xf0, 0xb7, 0x8b, 0x65, 0x0f, 0xa3, 0x27, 0x10,
- 0xe9, 0xa9, 0xe9, 0x2c, 0xbe, 0xbb, 0x82, 0xe3,
- 0x7b, 0x0b, 0x81, 0x3e, 0xa4, 0x6a, 0x4f, 0x3b,
- 0xd5, 0x61, 0xf8, 0x47, 0x04, 0x99, 0x5b, 0xff,
- 0xf3, 0x14, 0x6e, 0x57, 0x5b, 0xbf, 0x1b, 0xb4,
- 0x3f, 0xf9, 0x31, 0xf6, 0x95, 0xd5, 0x10, 0xa9,
- 0x72, 0x28, 0x23, 0xa9, 0x6a, 0xa2, 0xcf, 0x7d,
- 0xe3, 0x18, 0x95, 0xda, 0xbc, 0x6f, 0xe9, 0xd8,
- 0xef, 0x49, 0x3f, 0xd3, 0xef, 0x1f, 0xe1, 0x50,
- 0xe8, 0x8a, 0xc0, 0xce, 0xcc, 0xb7, 0x5e, 0x0e,
- 0x8b, 0x95, 0x80, 0xfd, 0x58, 0x2a, 0x9b, 0xc8,
- 0xb4, 0x17, 0x04, 0x46, 0x74, 0xd4, 0x68, 0x91,
- 0x33, 0xc8, 0x31, 0x15, 0x84, 0x16, 0x35, 0x03,
- 0x64, 0x6d, 0xa9, 0x4e, 0x20, 0xeb, 0xa9, 0x3f,
- 0x21, 0x5e, 0x9b, 0x09, 0xc3, 0x45, 0xf8, 0x7c,
- 0x59, 0x62, 0x29, 0x9a, 0x5c, 0xcf, 0xb4, 0x27,
- 0x5e, 0x13, 0xea, 0xb3, 0xef, 0xd9, 0x01, 0x2a,
- 0x65, 0x5f, 0x14, 0xf4, 0xbf, 0x28, 0x89, 0x3d,
- 0xdd, 0x9d, 0x52, 0xbd, 0x9e, 0x5b, 0x3b, 0xd2,
- 0xc2, 0x81, 0x35, 0xb6, 0xac, 0xdd, 0x27, 0xc3,
- 0x7b, 0x01, 0x5a, 0x6d, 0x4c, 0x5e, 0x2c, 0x30,
- 0xcb, 0x3a, 0xfa, 0xc1, 0xd7, 0x31, 0x67, 0x3e,
- 0x08, 0x6a, 0xe8, 0x8c, 0x75, 0xac, 0x1a, 0x6a,
- 0x52, 0xf7, 0x51, 0xcd, 0x85, 0x3f, 0x3c, 0xa7,
- 0xea, 0xbc, 0xd7, 0x18, 0x9e, 0x27, 0x73, 0xe6,
- 0x2b, 0x58, 0xb6, 0xd2, 0x29, 0x68, 0xd5, 0x8f,
- 0x00, 0x4d, 0x55, 0xf6, 0x61, 0x5a, 0xcc, 0x51,
- 0xa6, 0x5e, 0x85, 0xcb, 0x0b, 0xfd, 0x06, 0xca,
- 0xf5, 0xbf, 0x0d, 0x13, 0x74, 0x78, 0x6d, 0x9e,
- 0x20, 0x11, 0x84, 0x3e, 0x78, 0x17, 0x04, 0x4f,
- 0x64, 0x2c, 0x3b, 0x3e, 0x93, 0x7b, 0x58, 0x33,
- 0x07, 0x52, 0xf7, 0x60, 0x6a, 0xa8, 0x3b, 0x19,
- 0x27, 0x7a, 0x93, 0xc5, 0x53, 0xad, 0xec, 0xf6,
- 0xc8, 0x94, 0xee, 0x92, 0xea, 0xee, 0x7e, 0xea,
- 0xb9, 0x5f, 0xac, 0x59, 0x5d, 0x2e, 0x78, 0x53,
- 0x72, 0x81, 0x92, 0xdd, 0x1c, 0x63, 0xbe, 0x02,
- 0xeb, 0xa8, 0x1b, 0x2a, 0x6e, 0x72, 0xe3, 0x2d,
- 0x84, 0x0d, 0x8a, 0x22, 0xf6, 0xba, 0xab, 0x04,
- 0x8e, 0x04, 0x24, 0xdb, 0xcc, 0xe2, 0x69, 0xeb,
- 0x4e, 0xfa, 0x6b, 0x5b, 0xc8, 0xc0, 0xd9, 0x25,
- 0xcb, 0x40, 0x8d, 0x4b, 0x8e, 0xa0, 0xd4, 0x72,
- 0x98, 0x36, 0x46, 0x3b, 0x4f, 0x5f, 0x96, 0x84,
- 0x03, 0x28, 0x86, 0x4d, 0xa1, 0x8a, 0xd7, 0xb2,
- 0x5b, 0x27, 0x01, 0x80, 0x62, 0x49, 0x56, 0xb9,
- 0xa0, 0xa1, 0xe3, 0x6e, 0x22, 0x2a, 0x5d, 0x03,
- 0x86, 0x40, 0x36, 0x22, 0x5e, 0xd2, 0xe5, 0xc0,
- 0x6b, 0xfa, 0xac, 0x80, 0x4e, 0x09, 0x99, 0xbc,
- 0x2f, 0x9b, 0xcc, 0xf3, 0x4e, 0xf7, 0x99, 0x98,
- 0x11, 0x6e, 0x6f, 0x62, 0x22, 0x6b, 0x92, 0x95,
- 0x3b, 0xc3, 0xd2, 0x8e, 0x0f, 0x07, 0xc2, 0x51,
- 0x5c, 0x4d, 0xb2, 0x6e, 0xc0, 0x27, 0x73, 0xcd,
- 0x57, 0xb7, 0xf0, 0xe9, 0x2e, 0xc8, 0xe2, 0x0c,
- 0xd1, 0xb5, 0x0f, 0xff, 0xf9, 0xec, 0x38, 0xba,
- 0x97, 0xd6, 0x94, 0x9b, 0xd1, 0x79, 0xb6, 0x6a,
- 0x01, 0x17, 0xe4, 0x7e, 0xa6, 0xd5, 0x86, 0x19,
- 0xae, 0xf3, 0xf0, 0x62, 0x73, 0xc0, 0xf0, 0x0a,
- 0x7a, 0x96, 0x93, 0x72, 0x89, 0x7e, 0x25, 0x57,
- 0xf8, 0xf7, 0xd5, 0x1e, 0xe5, 0xac, 0xd6, 0x38,
- 0x4f, 0xe8, 0x81, 0xd1, 0x53, 0x41, 0x07, 0x2d,
- 0x58, 0x34, 0x1c, 0xef, 0x74, 0x2e, 0x61, 0xca,
- 0xd3, 0xeb, 0xd6, 0x93, 0x0a, 0xf2, 0xf2, 0x86,
- 0x9c, 0xe3, 0x7a, 0x52, 0xf5, 0x42, 0xf1, 0x8b,
- 0x10, 0xf2, 0x25, 0x68, 0x7e, 0x61, 0xb1, 0x19,
- 0xcf, 0x8f, 0x5a, 0x53, 0xb7, 0x68, 0x4f, 0x1a,
- 0x71, 0xe9, 0x83, 0x91, 0x3a, 0x78, 0x0f, 0xf7,
- 0xd4, 0x74, 0xf5, 0x06, 0xd2, 0x88, 0xb0, 0x06,
- 0xe5, 0xc0, 0xfb, 0xb3, 0x91, 0xad, 0xc0, 0x84,
- 0x31, 0xf2, 0x3a, 0xcf, 0x63, 0xe6, 0x4a, 0xd3,
- 0x78, 0xbe, 0xde, 0x73, 0x3e, 0x02, 0x8e, 0xb8,
- 0x3a, 0xf6, 0x55, 0xa7, 0xf8, 0x5a, 0xb5, 0x0e,
- 0x0c, 0xc5, 0xe5, 0x66, 0xd5, 0xd2, 0x18, 0xf3,
- 0xef, 0xa5, 0xc9, 0x68, 0x69, 0xe0, 0xcd, 0x00,
- 0x33, 0x99, 0x6e, 0xea, 0xcb, 0x06, 0x7a, 0xe1,
- 0xe1, 0x19, 0x0b, 0xe7, 0x08, 0xcd, 0x09, 0x1b,
- 0x85, 0xec, 0xc4, 0xd4, 0x75, 0xf0, 0xd6, 0xfb,
- 0x84, 0x95, 0x07, 0x44, 0xca, 0xa5, 0x2a, 0x6c,
- 0xc2, 0x00, 0x58, 0x08, 0x87, 0x9e, 0x0a, 0xd4,
- 0x06, 0xe2, 0x91, 0x5f, 0xb7, 0x1b, 0x11, 0xfa,
- 0x85, 0xfc, 0x7c, 0xf2, 0x0f, 0x6e, 0x3c, 0x8a,
- 0xe1, 0x0f, 0xa0, 0x33, 0x84, 0xce, 0x81, 0x4d,
- 0x32, 0x4d, 0xeb, 0x41, 0xcf, 0x5a, 0x05, 0x60,
- 0x47, 0x6c, 0x2a, 0xc4, 0x17, 0xd5, 0x16, 0x3a,
- 0xe4, 0xe7, 0xab, 0x84, 0x94, 0x22, 0xff, 0x56,
- 0xb0, 0x0c, 0x92, 0x6c, 0x19, 0x11, 0x4c, 0xb3,
- 0xed, 0x58, 0x48, 0x84, 0x2a, 0xe2, 0x19, 0x2a,
- 0xe1, 0xc0, 0x56, 0x82, 0x3c, 0x83, 0xb4, 0x58,
- 0x2d, 0xf0, 0xb5, 0x1e, 0x76, 0x85, 0x51, 0xc2,
- 0xe4, 0x95, 0x27, 0x96, 0xd1, 0x90, 0xc3, 0x17,
- 0x75, 0xa1, 0xbb, 0x46, 0x5f, 0xa6, 0xf2, 0xef,
- 0x71, 0x56, 0x92, 0xc5, 0x8a, 0x85, 0x52, 0xe4,
- 0x63, 0x21, 0x6f, 0x55, 0x85, 0x2b, 0x6b, 0x0d,
- 0xc9, 0x92, 0x77, 0x67, 0xe3, 0xff, 0x2a, 0x2b,
- 0x90, 0x01, 0x3d, 0x74, 0x63, 0x04, 0x61, 0x3c,
- 0x8e, 0xf8, 0xfc, 0x04, 0xdd, 0x21, 0x85, 0x92,
- 0x1e, 0x4d, 0x51, 0x8d, 0xb5, 0x6b, 0xf1, 0xda,
- 0x96, 0xf5, 0x8e, 0x3c, 0x38, 0x5a, 0xac, 0x9b,
- 0xba, 0x0c, 0x84, 0x5d, 0x50, 0x12, 0xc7, 0xc5,
- 0x7a, 0xcb, 0xb1, 0xfa, 0x16, 0x93, 0xdf, 0x98,
- 0xda, 0x3f, 0x49, 0xa3, 0x94, 0x78, 0x70, 0xc7,
- 0x0b, 0xb6, 0x91, 0xa6, 0x16, 0x2e, 0xcf, 0xfd,
- 0x51, 0x6a, 0x5b, 0xad, 0x7a, 0xdd, 0xa9, 0x48,
- 0x48, 0xac, 0xd6, 0x45, 0xbc, 0x23, 0x31, 0x1d,
- 0x86, 0x54, 0x8a, 0x7f, 0x04, 0x97, 0x71, 0x9e,
- 0xbc, 0x2e, 0x6b, 0xd9, 0x33, 0xc8, 0x20, 0xc9,
- 0xe0, 0x25, 0x86, 0x59, 0x15, 0xcf, 0x63, 0xe5,
- 0x99, 0xf1, 0x24, 0xf1, 0xba, 0xc4, 0x15, 0x02,
- 0xe2, 0xdb, 0xfe, 0x4a, 0xf8, 0x3b, 0x91, 0x13,
- 0x8d, 0x03, 0x81, 0x9f, 0xb3, 0x3f, 0x04, 0x03,
- 0x58, 0xc0, 0xef, 0x27, 0x82, 0x14, 0xd2, 0x7f,
- 0x93, 0x70, 0xb7, 0xb2, 0x02, 0x21, 0xb3, 0x07,
- 0x7f, 0x1c, 0xef, 0x88, 0xee, 0x29, 0x7a, 0x0b,
- 0x3d, 0x75, 0x5a, 0x93, 0xfe, 0x7f, 0x14, 0xf7,
- 0x4e, 0x4b, 0x7f, 0x21, 0x02, 0xad, 0xf9, 0x43,
- 0x29, 0x1a, 0xe8, 0x1b, 0xf5, 0x32, 0xb2, 0x96,
- 0xe6, 0xe8, 0x96, 0x20, 0x9b, 0x96, 0x8e, 0x7b,
- 0xfe, 0xd8, 0xc9, 0x9c, 0x65, 0x16, 0xd6, 0x68,
- 0x95, 0xf8, 0x22, 0xe2, 0xae, 0x84, 0x03, 0xfd,
- 0x87, 0xa2, 0x72, 0x79, 0x74, 0x95, 0xfa, 0xe1,
- 0xfe, 0xd0, 0x4e, 0x3d, 0x39, 0x2e, 0x67, 0x55,
- 0x71, 0x6c, 0x89, 0x33, 0x49, 0x0c, 0x1b, 0x46,
- 0x92, 0x31, 0x6f, 0xa6, 0xf0, 0x09, 0xbd, 0x2d,
- 0xe2, 0xca, 0xda, 0x18, 0x33, 0xce, 0x67, 0x37,
- 0xfd, 0x6f, 0xcb, 0x9d, 0xbd, 0x42, 0xbc, 0xb2,
- 0x9c, 0x28, 0xcd, 0x65, 0x3c, 0x61, 0xbc, 0xde,
- 0x9d, 0xe1, 0x2a, 0x3e, 0xbf, 0xee, 0x3c, 0xcb,
- 0xb1, 0x50, 0xa9, 0x2c, 0xbe, 0xb5, 0x43, 0xd0,
- 0xec, 0x29, 0xf9, 0x16, 0x6f, 0x31, 0xd9, 0x9b,
- 0x92, 0xb1, 0x32, 0xae, 0x0f, 0xb6, 0x9d, 0x0e,
- 0x25, 0x7f, 0x89, 0x1f, 0x1d, 0x01, 0x68, 0xab,
- 0x3d, 0xd1, 0x74, 0x5b, 0x4c, 0x38, 0x7f, 0x3d,
- 0x33, 0xa5, 0xa2, 0x9f, 0xda, 0x84, 0xa5, 0x82,
- 0x2d, 0x16, 0x66, 0x46, 0x08, 0x30, 0x14, 0x48,
- 0x5e, 0xca, 0xe3, 0xf4, 0x8c, 0xcb, 0x32, 0xc6,
- 0xf1, 0x43, 0x62, 0xc6, 0xef, 0x16, 0xfa, 0x43,
- 0xae, 0x9c, 0x53, 0xe3, 0x49, 0x45, 0x80, 0xfd,
- 0x1d, 0x8c, 0xa9, 0x6d, 0x77, 0x76, 0xaa, 0x40,
- 0xc4, 0x4e, 0x7b, 0x78, 0x6b, 0xe0, 0x1d, 0xce,
- 0x56, 0x3d, 0xf0, 0x11, 0xfe, 0x4f, 0x6a, 0x6d,
- 0x0f, 0x4f, 0x90, 0x38, 0x92, 0x17, 0xfa, 0x56,
- 0x12, 0xa6, 0xa1, 0x0a, 0xea, 0x2f, 0x50, 0xf9,
- 0x60, 0x66, 0x6c, 0x7d, 0x5a, 0x08, 0x8e, 0x3c,
- 0xf3, 0xf0, 0x33, 0x02, 0x11, 0x02, 0xfe, 0x4c,
- 0x56, 0x2b, 0x9f, 0x0c, 0xbd, 0x65, 0x8a, 0x83,
- 0xde, 0x7c, 0x05, 0x26, 0x93, 0x19, 0xcc, 0xf3,
- 0x71, 0x0e, 0xad, 0x2f, 0xb3, 0xc9, 0x38, 0x50,
- 0x64, 0xd5, 0x4c, 0x60, 0x5f, 0x02, 0x13, 0x34,
- 0xc9, 0x75, 0xc4, 0x60, 0xab, 0x2e, 0x17, 0x7d
-};
-
-static const uint8_t AES_CBC_ciphertext_2048B[] = {
- 0x8b, 0x55, 0xbd, 0xfd, 0x2b, 0x35, 0x76, 0x5c,
- 0xd1, 0x90, 0xd7, 0x6a, 0x63, 0x1e, 0x39, 0x71,
- 0x0d, 0x5c, 0xd8, 0x03, 0x00, 0x75, 0xf1, 0x07,
- 0x03, 0x8d, 0x76, 0xeb, 0x3b, 0x00, 0x1e, 0x33,
- 0x88, 0xfc, 0x8f, 0x08, 0x4d, 0x33, 0xf1, 0x3c,
- 0xee, 0xd0, 0x5d, 0x19, 0x8b, 0x3c, 0x50, 0x86,
- 0xfd, 0x8d, 0x58, 0x21, 0xb4, 0xae, 0x0f, 0x81,
- 0xe9, 0x9f, 0xc9, 0xc0, 0x90, 0xf7, 0x04, 0x6f,
- 0x39, 0x1d, 0x8a, 0x3f, 0x8d, 0x32, 0x23, 0xb5,
- 0x1f, 0xcc, 0x8a, 0x12, 0x2d, 0x46, 0x82, 0x5e,
- 0x6a, 0x34, 0x8c, 0xb1, 0x93, 0x70, 0x3b, 0xde,
- 0x55, 0xaf, 0x16, 0x35, 0x99, 0x84, 0xd5, 0x88,
- 0xc9, 0x54, 0xb1, 0xb2, 0xd3, 0xeb, 0x9e, 0x55,
- 0x9a, 0xa9, 0xa7, 0xf5, 0xda, 0x29, 0xcf, 0xe1,
- 0x98, 0x64, 0x45, 0x77, 0xf2, 0x12, 0x69, 0x8f,
- 0x78, 0xd8, 0x82, 0x41, 0xb2, 0x9f, 0xe2, 0x1c,
- 0x63, 0x9b, 0x24, 0x81, 0x67, 0x95, 0xa2, 0xff,
- 0x26, 0x9d, 0x65, 0x48, 0x61, 0x30, 0x66, 0x41,
- 0x68, 0x84, 0xbb, 0x59, 0x14, 0x8e, 0x9a, 0x62,
- 0xb6, 0xca, 0xda, 0xbe, 0x7c, 0x41, 0x52, 0x6e,
- 0x1b, 0x86, 0xbf, 0x08, 0xeb, 0x37, 0x84, 0x60,
- 0xe4, 0xc4, 0x1e, 0xa8, 0x4c, 0x84, 0x60, 0x2f,
- 0x70, 0x90, 0xf2, 0x26, 0xe7, 0x65, 0x0c, 0xc4,
- 0x58, 0x36, 0x8e, 0x4d, 0xdf, 0xff, 0x9a, 0x39,
- 0x93, 0x01, 0xcf, 0x6f, 0x6d, 0xde, 0xef, 0x79,
- 0xb0, 0xce, 0xe2, 0x98, 0xdb, 0x85, 0x8d, 0x62,
- 0x9d, 0xb9, 0x63, 0xfd, 0xf0, 0x35, 0xb5, 0xa9,
- 0x1b, 0xf9, 0xe5, 0xd4, 0x2e, 0x22, 0x2d, 0xcc,
- 0x42, 0xbf, 0x0e, 0x51, 0xf7, 0x15, 0x07, 0x32,
- 0x75, 0x5b, 0x74, 0xbb, 0x00, 0xef, 0xd4, 0x66,
- 0x8b, 0xad, 0x71, 0x53, 0x94, 0xd7, 0x7d, 0x2c,
- 0x40, 0x3e, 0x69, 0xa0, 0x4c, 0x86, 0x5e, 0x06,
- 0xed, 0xdf, 0x22, 0xe2, 0x24, 0x25, 0x4e, 0x9b,
- 0x5f, 0x49, 0x74, 0xba, 0xed, 0xb1, 0xa6, 0xeb,
- 0xae, 0x3f, 0xc6, 0x9e, 0x0b, 0x29, 0x28, 0x9a,
- 0xb6, 0xb2, 0x74, 0x58, 0xec, 0xa6, 0x4a, 0xed,
- 0xe5, 0x10, 0x00, 0x85, 0xe1, 0x63, 0x41, 0x61,
- 0x30, 0x7c, 0x97, 0xcf, 0x75, 0xcf, 0xb6, 0xf3,
- 0xf7, 0xda, 0x35, 0x3f, 0x85, 0x8c, 0x64, 0xca,
- 0xb7, 0xea, 0x7f, 0xe4, 0xa3, 0x4d, 0x30, 0x84,
- 0x8c, 0x9c, 0x80, 0x5a, 0x50, 0xa5, 0x64, 0xae,
- 0x26, 0xd3, 0xb5, 0x01, 0x73, 0x36, 0x8a, 0x92,
- 0x49, 0xc4, 0x1a, 0x94, 0x81, 0x9d, 0xf5, 0x6c,
- 0x50, 0xe1, 0x58, 0x0b, 0x75, 0xdd, 0x6b, 0x6a,
- 0xca, 0x69, 0xea, 0xc3, 0x33, 0x90, 0x9f, 0x3b,
- 0x65, 0x5d, 0x5e, 0xee, 0x31, 0xb7, 0x32, 0xfd,
- 0x56, 0x83, 0xb6, 0xfb, 0xa8, 0x04, 0xfc, 0x1e,
- 0x11, 0xfb, 0x02, 0x23, 0x53, 0x49, 0x45, 0xb1,
- 0x07, 0xfc, 0xba, 0xe7, 0x5f, 0x5d, 0x2d, 0x7f,
- 0x9e, 0x46, 0xba, 0xe9, 0xb0, 0xdb, 0x32, 0x04,
- 0xa4, 0xa7, 0x98, 0xab, 0x91, 0xcd, 0x02, 0x05,
- 0xf5, 0x74, 0x31, 0x98, 0x83, 0x3d, 0x33, 0x11,
- 0x0e, 0xe3, 0x8d, 0xa8, 0xc9, 0x0e, 0xf3, 0xb9,
- 0x47, 0x67, 0xe9, 0x79, 0x2b, 0x34, 0xcd, 0x9b,
- 0x45, 0x75, 0x29, 0xf0, 0xbf, 0xcc, 0xda, 0x3a,
- 0x91, 0xb2, 0x15, 0x27, 0x7a, 0xe5, 0xf5, 0x6a,
- 0x5e, 0xbe, 0x2c, 0x98, 0xe8, 0x40, 0x96, 0x4f,
- 0x8a, 0x09, 0xfd, 0xf6, 0xb2, 0xe7, 0x45, 0xb6,
- 0x08, 0xc1, 0x69, 0xe1, 0xb3, 0xc4, 0x24, 0x34,
- 0x07, 0x85, 0xd5, 0xa9, 0x78, 0xca, 0xfa, 0x4b,
- 0x01, 0x19, 0x4d, 0x95, 0xdc, 0xa5, 0xc1, 0x9c,
- 0xec, 0x27, 0x5b, 0xa6, 0x54, 0x25, 0xbd, 0xc8,
- 0x0a, 0xb7, 0x11, 0xfb, 0x4e, 0xeb, 0x65, 0x2e,
- 0xe1, 0x08, 0x9c, 0x3a, 0x45, 0x44, 0x33, 0xef,
- 0x0d, 0xb9, 0xff, 0x3e, 0x68, 0x9c, 0x61, 0x2b,
- 0x11, 0xb8, 0x5c, 0x47, 0x0f, 0x94, 0xf2, 0xf8,
- 0x0b, 0xbb, 0x99, 0x18, 0x85, 0xa3, 0xba, 0x44,
- 0xf3, 0x79, 0xb3, 0x63, 0x2c, 0x1f, 0x2a, 0x35,
- 0x3b, 0x23, 0x98, 0xab, 0xf4, 0x16, 0x36, 0xf8,
- 0xde, 0x86, 0xa4, 0xd4, 0x75, 0xff, 0x51, 0xf9,
- 0xeb, 0x42, 0x5f, 0x55, 0xe2, 0xbe, 0xd1, 0x5b,
- 0xb5, 0x38, 0xeb, 0xb4, 0x4d, 0xec, 0xec, 0x99,
- 0xe1, 0x39, 0x43, 0xaa, 0x64, 0xf7, 0xc9, 0xd8,
- 0xf2, 0x9a, 0x71, 0x43, 0x39, 0x17, 0xe8, 0xa8,
- 0xa2, 0xe2, 0xa4, 0x2c, 0x18, 0x11, 0x49, 0xdf,
- 0x18, 0xdd, 0x85, 0x6e, 0x65, 0x96, 0xe2, 0xba,
- 0xa1, 0x0a, 0x2c, 0xca, 0xdc, 0x5f, 0xe4, 0xf4,
- 0x35, 0x03, 0xb2, 0xa9, 0xda, 0xcf, 0xb7, 0x6d,
- 0x65, 0x82, 0x82, 0x67, 0x9d, 0x0e, 0xf3, 0xe8,
- 0x85, 0x6c, 0x69, 0xb8, 0x4c, 0xa6, 0xc6, 0x2e,
- 0x40, 0xb5, 0x54, 0x28, 0x95, 0xe4, 0x57, 0xe0,
- 0x5b, 0xf8, 0xde, 0x59, 0xe0, 0xfd, 0x89, 0x48,
- 0xac, 0x56, 0x13, 0x54, 0xb9, 0x1b, 0xf5, 0x59,
- 0x97, 0xb6, 0xb3, 0xe8, 0xac, 0x2d, 0xfc, 0xd2,
- 0xea, 0x57, 0x96, 0x57, 0xa8, 0x26, 0x97, 0x2c,
- 0x01, 0x89, 0x56, 0xea, 0xec, 0x8c, 0x53, 0xd5,
- 0xd7, 0x9e, 0xc9, 0x98, 0x0b, 0xad, 0x03, 0x75,
- 0xa0, 0x6e, 0x98, 0x8b, 0x97, 0x8d, 0x8d, 0x85,
- 0x7d, 0x74, 0xa7, 0x2d, 0xde, 0x67, 0x0c, 0xcd,
- 0x54, 0xb8, 0x15, 0x7b, 0xeb, 0xf5, 0x84, 0xb9,
- 0x78, 0xab, 0xd8, 0x68, 0x91, 0x1f, 0x6a, 0xa6,
- 0x28, 0x22, 0xf7, 0x00, 0x49, 0x00, 0xbe, 0x41,
- 0x71, 0x0a, 0xf5, 0xe7, 0x9f, 0xb4, 0x11, 0x41,
- 0x3f, 0xcd, 0xa9, 0xa9, 0x01, 0x8b, 0x6a, 0xeb,
- 0x54, 0x4c, 0x58, 0x92, 0x68, 0x02, 0x0e, 0xe9,
- 0xed, 0x65, 0x4c, 0xfb, 0x95, 0x48, 0x58, 0xa2,
- 0xaa, 0x57, 0x69, 0x13, 0x82, 0x0c, 0x2c, 0x4b,
- 0x5d, 0x4e, 0x18, 0x30, 0xef, 0x1c, 0xb1, 0x9d,
- 0x05, 0x05, 0x02, 0x1c, 0x97, 0xc9, 0x48, 0xfe,
- 0x5e, 0x7b, 0x77, 0xa3, 0x1f, 0x2a, 0x81, 0x42,
- 0xf0, 0x4b, 0x85, 0x12, 0x9c, 0x1f, 0x44, 0xb1,
- 0x14, 0x91, 0x92, 0x65, 0x77, 0xb1, 0x87, 0xa2,
- 0xfc, 0xa4, 0xe7, 0xd2, 0x9b, 0xf2, 0x17, 0xf0,
- 0x30, 0x1c, 0x8d, 0x33, 0xbc, 0x25, 0x28, 0x48,
- 0xfd, 0x30, 0x79, 0x0a, 0x99, 0x3e, 0xb4, 0x0f,
- 0x1e, 0xa6, 0x68, 0x76, 0x19, 0x76, 0x29, 0xac,
- 0x5d, 0xb8, 0x1e, 0x42, 0xd6, 0x85, 0x04, 0xbf,
- 0x64, 0x1c, 0x2d, 0x53, 0xe9, 0x92, 0x78, 0xf8,
- 0xc3, 0xda, 0x96, 0x92, 0x10, 0x6f, 0x45, 0x85,
- 0xaf, 0x5e, 0xcc, 0xa8, 0xc0, 0xc6, 0x2e, 0x73,
- 0x51, 0x3f, 0x5e, 0xd7, 0x52, 0x33, 0x71, 0x12,
- 0x6d, 0x85, 0xee, 0xea, 0x85, 0xa8, 0x48, 0x2b,
- 0x40, 0x64, 0x6d, 0x28, 0x73, 0x16, 0xd7, 0x82,
- 0xd9, 0x90, 0xed, 0x1f, 0xa7, 0x5c, 0xb1, 0x5c,
- 0x27, 0xb9, 0x67, 0x8b, 0xb4, 0x17, 0x13, 0x83,
- 0x5f, 0x09, 0x72, 0x0a, 0xd7, 0xa0, 0xec, 0x81,
- 0x59, 0x19, 0xb9, 0xa6, 0x5a, 0x37, 0x34, 0x14,
- 0x47, 0xf6, 0xe7, 0x6c, 0xd2, 0x09, 0x10, 0xe7,
- 0xdd, 0xbb, 0x02, 0xd1, 0x28, 0xfa, 0x01, 0x2c,
- 0x93, 0x64, 0x2e, 0x1b, 0x4c, 0x02, 0x52, 0xcb,
- 0x07, 0xa1, 0xb6, 0x46, 0x02, 0x80, 0xd9, 0x8f,
- 0x5c, 0x62, 0xbe, 0x78, 0x9e, 0x75, 0xc4, 0x97,
- 0x91, 0x39, 0x12, 0x65, 0xb9, 0x3b, 0xc2, 0xd1,
- 0xaf, 0xf2, 0x1f, 0x4e, 0x4d, 0xd1, 0xf0, 0x9f,
- 0xb7, 0x12, 0xfd, 0xe8, 0x75, 0x18, 0xc0, 0x9d,
- 0x8c, 0x70, 0xff, 0x77, 0x05, 0xb6, 0x1a, 0x1f,
- 0x96, 0x48, 0xf6, 0xfe, 0xd5, 0x5d, 0x98, 0xa5,
- 0x72, 0x1c, 0x84, 0x76, 0x3e, 0xb8, 0x87, 0x37,
- 0xdd, 0xd4, 0x3a, 0x45, 0xdd, 0x09, 0xd8, 0xe7,
- 0x09, 0x2f, 0x3e, 0x33, 0x9e, 0x7b, 0x8c, 0xe4,
- 0x85, 0x12, 0x4e, 0xf8, 0x06, 0xb7, 0xb1, 0x85,
- 0x24, 0x96, 0xd8, 0xfe, 0x87, 0x92, 0x81, 0xb1,
- 0xa3, 0x38, 0xb9, 0x56, 0xe1, 0xf6, 0x36, 0x41,
- 0xbb, 0xd6, 0x56, 0x69, 0x94, 0x57, 0xb3, 0xa4,
- 0xca, 0xa4, 0xe1, 0x02, 0x3b, 0x96, 0x71, 0xe0,
- 0xb2, 0x2f, 0x85, 0x48, 0x1b, 0x4a, 0x41, 0x80,
- 0x4b, 0x9c, 0xe0, 0xc9, 0x39, 0xb8, 0xb1, 0xca,
- 0x64, 0x77, 0x46, 0x58, 0xe6, 0x84, 0xd5, 0x2b,
- 0x65, 0xce, 0xe9, 0x09, 0xa3, 0xaa, 0xfb, 0x83,
- 0xa9, 0x28, 0x68, 0xfd, 0xcd, 0xfd, 0x76, 0x83,
- 0xe1, 0x20, 0x22, 0x77, 0x3a, 0xa3, 0xb2, 0x93,
- 0x14, 0x91, 0xfc, 0xe2, 0x17, 0x63, 0x2b, 0xa6,
- 0x29, 0x38, 0x7b, 0x9b, 0x8b, 0x15, 0x77, 0xd6,
- 0xaa, 0x92, 0x51, 0x53, 0x50, 0xff, 0xa0, 0x35,
- 0xa0, 0x59, 0x7d, 0xf0, 0x11, 0x23, 0x49, 0xdf,
- 0x5a, 0x21, 0xc2, 0xfe, 0x35, 0xa0, 0x1d, 0xe2,
- 0xae, 0xa2, 0x8a, 0x61, 0x5b, 0xf7, 0xf1, 0x1c,
- 0x1c, 0xec, 0xc4, 0xf6, 0xdc, 0xaa, 0xc8, 0xc2,
- 0xe5, 0xa1, 0x2e, 0x14, 0xe5, 0xc6, 0xc9, 0x73,
- 0x03, 0x78, 0xeb, 0xed, 0xe0, 0x3e, 0xc5, 0xf4,
- 0xf1, 0x50, 0xb2, 0x01, 0x91, 0x96, 0xf5, 0xbb,
- 0xe1, 0x32, 0xcd, 0xa8, 0x66, 0xbf, 0x73, 0x85,
- 0x94, 0xd6, 0x7e, 0x68, 0xc5, 0xe4, 0xed, 0xd5,
- 0xe3, 0x67, 0x4c, 0xa5, 0xb3, 0x1f, 0xdf, 0xf8,
- 0xb3, 0x73, 0x5a, 0xac, 0xeb, 0x46, 0x16, 0x24,
- 0xab, 0xca, 0xa4, 0xdd, 0x87, 0x0e, 0x24, 0x83,
- 0x32, 0x04, 0x4c, 0xd8, 0xda, 0x7d, 0xdc, 0xe3,
- 0x01, 0x93, 0xf3, 0xc1, 0x5b, 0xbd, 0xc3, 0x1d,
- 0x40, 0x62, 0xde, 0x94, 0x03, 0x85, 0x91, 0x2a,
- 0xa0, 0x25, 0x10, 0xd3, 0x32, 0x9f, 0x93, 0x00,
- 0xa7, 0x8a, 0xfa, 0x77, 0x7c, 0xaf, 0x4d, 0xc8,
- 0x7a, 0xf3, 0x16, 0x2b, 0xba, 0xeb, 0x74, 0x51,
- 0xb8, 0xdd, 0x32, 0xad, 0x68, 0x7d, 0xdd, 0xca,
- 0x60, 0x98, 0xc9, 0x9b, 0xb6, 0x5d, 0x4d, 0x3a,
- 0x66, 0x8a, 0xbe, 0x05, 0xf9, 0x0c, 0xc5, 0xba,
- 0x52, 0x82, 0x09, 0x1f, 0x5a, 0x66, 0x89, 0x69,
- 0xa3, 0x5d, 0x93, 0x50, 0x7d, 0x44, 0xc3, 0x2a,
- 0xb8, 0xab, 0xec, 0xa6, 0x5a, 0xae, 0x4a, 0x6a,
- 0xcd, 0xfd, 0xb6, 0xff, 0x3d, 0x98, 0x05, 0xd9,
- 0x5b, 0x29, 0xc4, 0x6f, 0xe0, 0x76, 0xe2, 0x3f,
- 0xec, 0xd7, 0xa4, 0x91, 0x63, 0xf5, 0x4e, 0x4b,
- 0xab, 0x20, 0x8c, 0x3a, 0x41, 0xed, 0x8b, 0x4b,
- 0xb9, 0x01, 0x21, 0xc0, 0x6d, 0xfd, 0x70, 0x5b,
- 0x20, 0x92, 0x41, 0x89, 0x74, 0xb7, 0xe9, 0x8b,
- 0xfc, 0x6d, 0x17, 0x3f, 0x7f, 0x89, 0x3d, 0x6b,
- 0x8f, 0xbc, 0xd2, 0x57, 0xe9, 0xc9, 0x6e, 0xa7,
- 0x19, 0x26, 0x18, 0xad, 0xef, 0xb5, 0x87, 0xbf,
- 0xb8, 0xa8, 0xd6, 0x7d, 0xdd, 0x5f, 0x94, 0x54,
- 0x09, 0x92, 0x2b, 0xf5, 0x04, 0xf7, 0x36, 0x69,
- 0x8e, 0xf4, 0xdc, 0x1d, 0x6e, 0x55, 0xbb, 0xe9,
- 0x13, 0x05, 0x83, 0x35, 0x9c, 0xed, 0xcf, 0x8c,
- 0x26, 0x8c, 0x7b, 0xc7, 0x0b, 0xba, 0xfd, 0xe2,
- 0x84, 0x5c, 0x2a, 0x79, 0x43, 0x99, 0xb2, 0xc3,
- 0x82, 0x87, 0xc8, 0xcd, 0x37, 0x6d, 0xa1, 0x2b,
- 0x39, 0xb2, 0x38, 0x99, 0xd9, 0xfc, 0x02, 0x15,
- 0x55, 0x21, 0x62, 0x59, 0xeb, 0x00, 0x86, 0x08,
- 0x20, 0xbe, 0x1a, 0x62, 0x4d, 0x7e, 0xdf, 0x68,
- 0x73, 0x5b, 0x5f, 0xaf, 0x84, 0x96, 0x2e, 0x1f,
- 0x6b, 0x03, 0xc9, 0xa6, 0x75, 0x18, 0xe9, 0xd4,
- 0xbd, 0xc8, 0xec, 0x9a, 0x5a, 0xb3, 0x99, 0xab,
- 0x5f, 0x7c, 0x08, 0x7f, 0x69, 0x4d, 0x52, 0xa2,
- 0x30, 0x17, 0x3b, 0x16, 0x15, 0x1b, 0x11, 0x62,
- 0x3e, 0x80, 0x4b, 0x85, 0x7c, 0x9c, 0xd1, 0x3a,
- 0x13, 0x01, 0x5e, 0x45, 0xf1, 0xc8, 0x5f, 0xcd,
- 0x0e, 0x21, 0xf5, 0x82, 0xd4, 0x7b, 0x5c, 0x45,
- 0x27, 0x6b, 0xef, 0xfe, 0xb8, 0xc0, 0x6f, 0xdc,
- 0x60, 0x7b, 0xe4, 0xd5, 0x75, 0x71, 0xe6, 0xe8,
- 0x7d, 0x6b, 0x6d, 0x80, 0xaf, 0x76, 0x41, 0x58,
- 0xb7, 0xac, 0xb7, 0x13, 0x2f, 0x81, 0xcc, 0xf9,
- 0x19, 0x97, 0xe8, 0xee, 0x40, 0x91, 0xfc, 0x89,
- 0x13, 0x1e, 0x67, 0x9a, 0xdb, 0x8f, 0x8f, 0xc7,
- 0x4a, 0xc9, 0xaf, 0x2f, 0x67, 0x01, 0x3c, 0xb8,
- 0xa8, 0x3e, 0x78, 0x93, 0x1b, 0xdf, 0xbb, 0x34,
- 0x0b, 0x1a, 0xfa, 0xc2, 0x2d, 0xc5, 0x1c, 0xec,
- 0x97, 0x4f, 0x48, 0x41, 0x15, 0x0e, 0x75, 0xed,
- 0x66, 0x8c, 0x17, 0x7f, 0xb1, 0x48, 0x13, 0xc1,
- 0xfb, 0x60, 0x06, 0xf9, 0x72, 0x41, 0x3e, 0xcf,
- 0x6e, 0xb6, 0xc8, 0xeb, 0x4b, 0x5a, 0xd2, 0x0c,
- 0x28, 0xda, 0x02, 0x7a, 0x46, 0x21, 0x42, 0xb5,
- 0x34, 0xda, 0xcb, 0x5e, 0xbd, 0x66, 0x5c, 0xca,
- 0xff, 0x52, 0x43, 0x89, 0xf9, 0x10, 0x9a, 0x9e,
- 0x9b, 0xe3, 0xb0, 0x51, 0xe9, 0xf3, 0x0a, 0x35,
- 0x77, 0x54, 0xcc, 0xac, 0xa6, 0xf1, 0x2e, 0x36,
- 0x89, 0xac, 0xc5, 0xc6, 0x62, 0x5a, 0xc0, 0x6d,
- 0xc4, 0xe1, 0xf7, 0x64, 0x30, 0xff, 0x11, 0x40,
- 0x13, 0x89, 0xd8, 0xd7, 0x73, 0x3f, 0x93, 0x08,
- 0x68, 0xab, 0x66, 0x09, 0x1a, 0xea, 0x78, 0xc9,
- 0x52, 0xf2, 0xfd, 0x93, 0x1b, 0x94, 0xbe, 0x5c,
- 0xe5, 0x00, 0x6e, 0x00, 0xb9, 0xea, 0x27, 0xaa,
- 0xb3, 0xee, 0xe3, 0xc8, 0x6a, 0xb0, 0xc1, 0x8e,
- 0x9b, 0x54, 0x40, 0x10, 0x96, 0x06, 0xe8, 0xb3,
- 0xf5, 0x55, 0x77, 0xd7, 0x5c, 0x94, 0xc1, 0x74,
- 0xf3, 0x07, 0x64, 0xac, 0x1c, 0xde, 0xc7, 0x22,
- 0xb0, 0xbf, 0x2a, 0x5a, 0xc0, 0x8f, 0x8a, 0x83,
- 0x50, 0xc2, 0x5e, 0x97, 0xa0, 0xbe, 0x49, 0x7e,
- 0x47, 0xaf, 0xa7, 0x20, 0x02, 0x35, 0xa4, 0x57,
- 0xd9, 0x26, 0x63, 0xdb, 0xf1, 0x34, 0x42, 0x89,
- 0x36, 0xd1, 0x77, 0x6f, 0xb1, 0xea, 0x79, 0x7e,
- 0x95, 0x10, 0x5a, 0xee, 0xa3, 0xae, 0x6f, 0xba,
- 0xa9, 0xef, 0x5a, 0x7e, 0x34, 0x03, 0x04, 0x07,
- 0x92, 0xd6, 0x07, 0x79, 0xaa, 0x14, 0x90, 0x97,
- 0x05, 0x4d, 0xa6, 0x27, 0x10, 0x5c, 0x25, 0x24,
- 0xcb, 0xcc, 0xf6, 0x77, 0x9e, 0x43, 0x23, 0xd4,
- 0x98, 0xef, 0x22, 0xa8, 0xad, 0xf2, 0x26, 0x08,
- 0x59, 0x69, 0xa4, 0xc3, 0x97, 0xe0, 0x5c, 0x6f,
- 0xeb, 0x3d, 0xd4, 0x62, 0x6e, 0x80, 0x61, 0x02,
- 0xf4, 0xfc, 0x94, 0x79, 0xbb, 0x4e, 0x6d, 0xd7,
- 0x30, 0x5b, 0x10, 0x11, 0x5a, 0x3d, 0xa7, 0x50,
- 0x1d, 0x9a, 0x13, 0x5f, 0x4f, 0xa8, 0xa7, 0xb6,
- 0x39, 0xc7, 0xea, 0xe6, 0x19, 0x61, 0x69, 0xc7,
- 0x9a, 0x3a, 0xeb, 0x9d, 0xdc, 0xf7, 0x06, 0x37,
- 0xbd, 0xac, 0xe3, 0x18, 0xff, 0xfe, 0x11, 0xdb,
- 0x67, 0x42, 0xb4, 0xea, 0xa8, 0xbd, 0xb0, 0x76,
- 0xd2, 0x74, 0x32, 0xc2, 0xa4, 0x9c, 0xe7, 0x60,
- 0xc5, 0x30, 0x9a, 0x57, 0x66, 0xcd, 0x0f, 0x02,
- 0x4c, 0xea, 0xe9, 0xd3, 0x2a, 0x5c, 0x09, 0xc2,
- 0xff, 0x6a, 0xde, 0x5d, 0xb7, 0xe9, 0x75, 0x6b,
- 0x29, 0x94, 0xd6, 0xf7, 0xc3, 0xdf, 0xfb, 0x70,
- 0xec, 0xb5, 0x8c, 0xb0, 0x78, 0x7a, 0xee, 0x52,
- 0x5f, 0x8c, 0xae, 0x85, 0xe5, 0x98, 0xa2, 0xb7,
- 0x7c, 0x02, 0x2a, 0xcc, 0x9e, 0xde, 0x99, 0x5f,
- 0x84, 0x20, 0xbb, 0xdc, 0xf2, 0xd2, 0x13, 0x46,
- 0x3c, 0xd6, 0x4d, 0xe7, 0x50, 0xef, 0x55, 0xc3,
- 0x96, 0x9f, 0xec, 0x6c, 0xd8, 0xe2, 0xea, 0xed,
- 0xc7, 0x33, 0xc9, 0xb3, 0x1c, 0x4f, 0x1d, 0x83,
- 0x1d, 0xe4, 0xdd, 0xb2, 0x24, 0x8f, 0xf9, 0xf5
-};
-
-
-static const uint8_t HMAC_SHA256_ciphertext_64B_digest[] = {
- 0xc5, 0x6d, 0x4f, 0x29, 0xf4, 0xd2, 0xcc, 0x87,
- 0x3c, 0x81, 0x02, 0x6d, 0x38, 0x7a, 0x67, 0x3e,
- 0x95, 0x9c, 0x5c, 0x8f, 0xda, 0x5c, 0x06, 0xe0,
- 0x65, 0xf1, 0x6c, 0x51, 0x52, 0x49, 0x3e, 0x5f
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_128B_digest[] = {
- 0x76, 0x64, 0x2d, 0x69, 0x71, 0x5d, 0x6a, 0xd8,
- 0x9f, 0x74, 0x11, 0x2f, 0x58, 0xe0, 0x4a, 0x2f,
- 0x6c, 0x88, 0x5e, 0x4d, 0x9c, 0x79, 0x83, 0x1c,
- 0x8a, 0x14, 0xd0, 0x07, 0xfb, 0xbf, 0x6c, 0x8f
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_256B_digest[] = {
- 0x05, 0xa7, 0x44, 0xcd, 0x91, 0x8c, 0x95, 0xcf,
- 0x7b, 0x8f, 0xd3, 0x90, 0x86, 0x7e, 0x7b, 0xb9,
- 0x05, 0xd6, 0x6e, 0x7a, 0xc1, 0x7b, 0x26, 0xff,
- 0xd3, 0x4b, 0xe0, 0x22, 0x8b, 0xa8, 0x47, 0x52
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_512B_digest[] = {
- 0x08, 0xb7, 0x29, 0x54, 0x18, 0x7e, 0x97, 0x49,
- 0xc6, 0x7c, 0x9f, 0x94, 0xa5, 0x4f, 0xa2, 0x25,
- 0xd0, 0xe2, 0x30, 0x7b, 0xad, 0x93, 0xc9, 0x12,
- 0x0f, 0xf0, 0xf0, 0x71, 0xc2, 0xf6, 0x53, 0x8f
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_768B_digest[] = {
- 0xe4, 0x3e, 0x73, 0x93, 0x03, 0xaf, 0x6f, 0x9c,
- 0xca, 0x57, 0x3b, 0x4a, 0x6e, 0x83, 0x58, 0xf5,
- 0x66, 0xc2, 0xb4, 0xa7, 0xe0, 0xee, 0x63, 0x6b,
- 0x48, 0xb7, 0x50, 0x45, 0x69, 0xdf, 0x5c, 0x5b
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_1024B_digest[] = {
- 0x03, 0xb9, 0x96, 0x26, 0xdc, 0x1c, 0xab, 0xe2,
- 0xf5, 0x70, 0x55, 0x15, 0x67, 0x6e, 0x48, 0x11,
- 0xe7, 0x67, 0xea, 0xfa, 0x5c, 0x6b, 0x28, 0x22,
- 0xc9, 0x0e, 0x67, 0x04, 0xb3, 0x71, 0x7f, 0x88
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_1280B_digest[] = {
- 0x01, 0x91, 0xb8, 0x78, 0xd3, 0x21, 0x74, 0xa5,
- 0x1c, 0x8b, 0xd4, 0xd2, 0xc0, 0x49, 0xd7, 0xd2,
- 0x16, 0x46, 0x66, 0x85, 0x50, 0x6d, 0x08, 0xcc,
- 0xc7, 0x0a, 0xa3, 0x71, 0xcc, 0xde, 0xee, 0xdc
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_1536B_digest[] = {
- 0xf2, 0xe5, 0xe9, 0x57, 0x53, 0xd7, 0x69, 0x28,
- 0x7b, 0x69, 0xb5, 0x49, 0xa3, 0x31, 0x56, 0x5f,
- 0xa4, 0xe9, 0x87, 0x26, 0x2f, 0xe0, 0x2d, 0xd6,
- 0x08, 0x44, 0x01, 0x71, 0x0c, 0x93, 0x85, 0x84
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_1792B_digest[] = {
- 0xf6, 0x57, 0x62, 0x01, 0xbf, 0x2d, 0xea, 0x4a,
- 0xef, 0x43, 0x85, 0x60, 0x18, 0xdf, 0x8b, 0xb4,
- 0x60, 0xc0, 0xfd, 0x2f, 0x90, 0x15, 0xe6, 0x91,
- 0x56, 0x61, 0x68, 0x7f, 0x5e, 0x92, 0xa8, 0xdd
-};
-
-static const uint8_t HMAC_SHA256_ciphertext_2048B_digest[] = {
- 0x81, 0x1a, 0x29, 0xbc, 0x6b, 0x9f, 0xbb, 0xb8,
- 0xef, 0x71, 0x7b, 0x1f, 0x6f, 0xd4, 0x7e, 0x68,
- 0x3a, 0x9c, 0xb9, 0x98, 0x22, 0x81, 0xfa, 0x95,
- 0xee, 0xbc, 0x7f, 0x23, 0x29, 0x88, 0x76, 0xb8
-};
-
-struct crypto_data_params {
- const char *name;
- uint16_t length;
- const char *plaintext;
- struct crypto_expected_output {
- const uint8_t *ciphertext;
- const uint8_t *digest;
- } expected;
-};
-
-#define MAX_PACKET_SIZE_INDEX 10
-
-struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
- { "64B", 64, &plaintext_quote[sizeof(plaintext_quote) - 1 - 64],
- { AES_CBC_ciphertext_64B, HMAC_SHA256_ciphertext_64B_digest } },
- { "128B", 128, &plaintext_quote[sizeof(plaintext_quote) - 1 - 128],
- { AES_CBC_ciphertext_128B, HMAC_SHA256_ciphertext_128B_digest } },
- { "256B", 256, &plaintext_quote[sizeof(plaintext_quote) - 1 - 256],
- { AES_CBC_ciphertext_256B, HMAC_SHA256_ciphertext_256B_digest } },
- { "512B", 512, &plaintext_quote[sizeof(plaintext_quote) - 1 - 512],
- { AES_CBC_ciphertext_512B, HMAC_SHA256_ciphertext_512B_digest } },
- { "768B", 768, &plaintext_quote[sizeof(plaintext_quote) - 1 - 768],
- { AES_CBC_ciphertext_768B, HMAC_SHA256_ciphertext_768B_digest } },
- { "1024B", 1024, &plaintext_quote[sizeof(plaintext_quote) - 1 - 1024],
- { AES_CBC_ciphertext_1024B, HMAC_SHA256_ciphertext_1024B_digest } },
- { "1280B", 1280, &plaintext_quote[sizeof(plaintext_quote) - 1 - 1280],
- { AES_CBC_ciphertext_1280B, HMAC_SHA256_ciphertext_1280B_digest } },
- { "1536B", 1536, &plaintext_quote[sizeof(plaintext_quote) - 1 - 1536],
- { AES_CBC_ciphertext_1536B, HMAC_SHA256_ciphertext_1536B_digest } },
- { "1792B", 1792, &plaintext_quote[sizeof(plaintext_quote) - 1 - 1792],
- { AES_CBC_ciphertext_1792B, HMAC_SHA256_ciphertext_1792B_digest } },
- { "2048B", 2048, &plaintext_quote[sizeof(plaintext_quote) - 1 - 2048],
- { AES_CBC_ciphertext_2048B, HMAC_SHA256_ciphertext_2048B_digest } }
-};
-
-static int
-test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
-{
- uint32_t num_to_submit = 4096;
- struct rte_crypto_op *c_ops[num_to_submit];
- struct rte_crypto_op *proc_ops[num_to_submit];
- uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
- uint32_t burst_sent, burst_received;
- uint32_t i, burst_size, num_sent, num_received;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
- struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices available. Is kernel driver loaded?\n");
- return TEST_FAILED;
- }
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_128_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
- ut_params->cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
- ut_params->auth_xform.auth.key.data = hmac_sha256_key;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA256;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
-
- /* Create Crypto session*/
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
-
- rte_cryptodev_sym_session_init(ts_params->dev_id, test_crypto_session,
- &ut_params->cipher_xform, ts_params->sess_mp);
-
- TEST_ASSERT_NOT_NULL(test_crypto_session, "Session creation failed");
-
- /* Generate Crypto op data structure(s) */
- for (i = 0; i < num_to_submit ; i++) {
- struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
- data_params[0].expected.ciphertext,
- data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
- DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest,
- "no room to append digest");
-
- rte_memcpy(ut_params->digest, data_params[0].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
-
-
- struct rte_crypto_op *op =
- rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
-
- rte_crypto_op_attach_sym_session(op, test_crypto_session);
-
- op->sym->auth.digest.data = ut_params->digest;
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- data_params[0].length);
-
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = data_params[0].length;
-
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- aes_cbc_128_iv, CIPHER_IV_LENGTH_AES_CBC);
-
- op->sym->cipher.data.offset = 0;
- op->sym->cipher.data.length = data_params[0].length;
-
- op->sym->m_src = m;
-
- c_ops[i] = op;
- }
-
- printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
- "algorithm with a constant request size of %u.",
- data_params[0].length);
- printf("\nThis test will keep retries at 0 and only measure IA cycle "
- "cost for each request.");
- printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
- printf("\tRetries (Device Busy)\tAverage IA cycle cost "
- "(assuming 0 retries)");
- for (i = 2; i <= 128 ; i *= 2) {
- num_sent = 0;
- num_received = 0;
- retries = 0;
- failed_polls = 0;
- burst_size = i;
- total_cycles = 0;
- while (num_sent < num_to_submit) {
- start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num,
- 0, &c_ops[num_sent],
- ((num_to_submit-num_sent) < burst_size) ?
- num_to_submit-num_sent : burst_size);
- if (burst_sent == 0)
- retries++;
- else
- num_sent += burst_sent;
- end_cycles = rte_rdtsc_precise();
- total_cycles += (end_cycles - start_cycles);
- /*
- * Wait until requests have been sent.
- */
- rte_delay_ms(1);
-
- start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(
- dev_num, 0, proc_ops, burst_size);
- if (burst_received == 0)
- failed_polls++;
- else
- num_received += burst_received;
- end_cycles = rte_rdtsc_precise();
- total_cycles += end_cycles - start_cycles;
- }
-
- while (num_received != num_to_submit) {
- if (gbl_driver_id ==
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)))
- rte_cryptodev_enqueue_burst(dev_num, 0,
- NULL, 0);
-
- burst_received = rte_cryptodev_dequeue_burst(
- dev_num, 0, proc_ops, burst_size);
- if (burst_received == 0)
- failed_polls++;
- else
- num_received += burst_received;
- }
-
- printf("\n%u\t%u\t%u\t\t%u\t\t%u", dev_num, 0,
- num_sent, num_received, burst_size);
- printf("\t\t%"PRIu64, retries);
- printf("\t\t\t%"PRIu64, total_cycles/num_received);
- }
- printf("\n");
-
- for (i = 0; i < num_to_submit ; i++) {
- rte_pktmbuf_free(c_ops[i]->sym->m_src);
- rte_crypto_op_free(c_ops[i]);
- }
- return TEST_SUCCESS;
-}
-
-static int
-test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
-{
- uint32_t num_to_submit = pparams->total_operations;
- struct rte_crypto_op *c_ops[num_to_submit];
- struct rte_crypto_op *proc_ops[num_to_submit];
- uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
- uint32_t burst_sent = 0, burst_received = 0;
- uint32_t i, burst_size, num_sent, num_ops_received;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- static struct rte_cryptodev_sym_session *sess;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- printf("\nAnd is kernel driver loaded for HW PMDs?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_snow3g_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
- struct rte_mbuf *m = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
- TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
-
- struct rte_crypto_op *op =
- rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
-
- op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size);
- TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
-
- c_ops[i] = op;
- }
-
- if (pparams->chain == AEAD)
- printf("\nOn %s dev%u qp%u, %s, aead algo:%s, "
- "Packet Size %u bytes",
- pmd_name(gbl_driver_id),
- ts_params->dev_id, 0,
- chain_mode_name(pparams->chain),
- rte_crypto_aead_algorithm_strings[pparams->aead_algo],
- pparams->buf_size);
- else
- printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
- "Packet Size %u bytes",
- pmd_name(gbl_driver_id),
- ts_params->dev_id, 0,
- chain_mode_name(pparams->chain),
- rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
- rte_crypto_auth_algorithm_strings[pparams->auth_algo],
- pparams->buf_size);
- printf("\nOps Tx\tOps Rx\tOps/burst ");
- printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
-
- for (i = 2; i <= 128 ; i *= 2) {
- num_sent = 0;
- num_ops_received = 0;
- retries = 0;
- failed_polls = 0;
- burst_size = i;
- total_cycles = 0;
- while (num_sent < num_to_submit) {
- start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(ts_params->dev_id,
- 0, &c_ops[num_sent],
- ((num_to_submit-num_sent) < burst_size) ?
- num_to_submit-num_sent : burst_size);
- end_cycles = rte_rdtsc_precise();
- if (burst_sent == 0)
- retries++;
- num_sent += burst_sent;
- total_cycles += (end_cycles - start_cycles);
-
- /* Wait until requests have been sent. */
-
- rte_delay_ms(1);
-
- start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops, burst_size);
- end_cycles = rte_rdtsc_precise();
- if (burst_received < burst_sent)
- failed_polls++;
- num_ops_received += burst_received;
-
- total_cycles += end_cycles - start_cycles;
- }
-
- while (num_ops_received != num_to_submit) {
- if (gbl_driver_id ==
- rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)))
- rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
- NULL, 0);
- start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops, burst_size);
- end_cycles = rte_rdtsc_precise();
- total_cycles += end_cycles - start_cycles;
- if (burst_received == 0)
- failed_polls++;
- num_ops_received += burst_received;
- }
-
- printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
- printf("\t\t%"PRIu64, retries);
- printf("\t%"PRIu64, failed_polls);
- printf("\t\t%"PRIu64, total_cycles/num_ops_received);
- printf("\t\t%"PRIu64, (total_cycles/num_ops_received)*burst_size);
- printf("\t\t%"PRIu64, total_cycles/(num_ops_received*pparams->buf_size));
- }
- printf("\n");
-
- for (i = 0; i < num_to_submit ; i++) {
- rte_pktmbuf_free(c_ops[i]->sym->m_src);
- rte_crypto_op_free(c_ops[i]);
- }
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id,
- sess);
- rte_cryptodev_sym_session_free(sess);
-
- return TEST_SUCCESS;
-}
-
-static int
-test_perf_snow3G_vary_burst_size(void)
-{
- unsigned total_operations = 4096;
- /*no need to vary pkt size for QAT, should have no effect on IA cycles */
- uint16_t buf_lengths[] = {40};
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_ONLY,
- .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_NULL,
- },
- {
- .chain = HASH_ONLY,
- .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
- .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- .key_length = 16
- },
- };
-
- printf("\n\nStart %s.", __func__);
- printf("\nThis Test measures the average IA cycle cost using a "
- "constant request(packet) size. ");
- printf("Cycle cost is only valid when indicators show device is not busy,"
- " i.e. Retries and EmptyPolls = 0");
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- printf("\n");
- params_set[i].total_operations = total_operations;
-
- for (j = 0;
- j < RTE_DIM(buf_lengths);
- j++) {
-
- params_set[i].buf_size = buf_lengths[j];
-
- test_perf_snow3G_optimise_cyclecount(&params_set[i]);
- }
-
- }
-
- return 0;
-}
-
-static int
-test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
-{
- uint32_t num_to_submit = pparams->total_operations;
- struct rte_crypto_op *c_ops[num_to_submit];
- struct rte_crypto_op *proc_ops[num_to_submit];
- uint64_t failed_polls, retries, start_cycles,
- end_cycles, total_cycles = 0;
- uint32_t burst_sent = 0, burst_received = 0;
- uint32_t i, burst_size, num_sent, num_ops_received;
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- static struct rte_crypto_op *(*test_perf_set_crypto_op)
- (struct rte_crypto_op *, struct rte_mbuf *,
- struct rte_cryptodev_sym_session *,
- unsigned int,
- enum chain_mode);
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_openssl_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo,
- pparams->aead_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
- struct rte_mbuf *m = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
- TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
-
- struct rte_crypto_op *op =
- rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
-
- if (pparams->chain == AEAD)
- test_perf_set_crypto_op =
- test_perf_set_crypto_op_aes_gcm;
- else {
- switch (pparams->cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
- break;
- default:
- return TEST_FAILED;
- }
- }
-
- op = test_perf_set_crypto_op(op, m, sess, pparams->buf_size,
- pparams->chain);
- TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
-
- c_ops[i] = op;
- }
-
- if (pparams->chain == AEAD)
- printf("\nOn %s dev%u qp%u, %s, aead_algo:%s, "
- "key length:%u, Packet Size %u bytes",
- pmd_name(gbl_driver_id),
- ts_params->dev_id, 0,
- chain_mode_name(pparams->chain),
- rte_crypto_aead_algorithm_strings[pparams->aead_algo],
- pparams->key_length,
- pparams->buf_size);
- else
- printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
- "key length:%u, Packet Size %u bytes",
- pmd_name(gbl_driver_id),
- ts_params->dev_id, 0,
- chain_mode_name(pparams->chain),
- rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
- rte_crypto_auth_algorithm_strings[pparams->auth_algo],
- pparams->key_length,
- pparams->buf_size);
- printf("\nOps Tx\tOps Rx\tOps/burst ");
- printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\t"
- "IACycles/Byte");
-
- for (i = 2; i <= 128 ; i *= 2) {
- num_sent = 0;
- num_ops_received = 0;
- retries = 0;
- failed_polls = 0;
- burst_size = i;
- total_cycles = 0;
- while (num_sent < num_to_submit) {
- start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(
- ts_params->dev_id,
- 0, &c_ops[num_sent],
- ((num_to_submit - num_sent) <
- burst_size) ?
- num_to_submit - num_sent : burst_size);
- end_cycles = rte_rdtsc_precise();
- if (burst_sent == 0)
- retries++;
- num_sent += burst_sent;
- total_cycles += (end_cycles - start_cycles);
-
- /* Wait until requests have been sent. */
- rte_delay_ms(1);
-
- start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops,
- burst_size);
- end_cycles = rte_rdtsc_precise();
- if (burst_received < burst_sent)
- failed_polls++;
- num_ops_received += burst_received;
-
- total_cycles += end_cycles - start_cycles;
- }
-
- while (num_ops_received != num_to_submit) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
- NULL, 0);
-
- start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops,
- burst_size);
- end_cycles = rte_rdtsc_precise();
-
- total_cycles += end_cycles - start_cycles;
- if (burst_received == 0)
- failed_polls++;
- num_ops_received += burst_received;
- }
-
- printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
- printf("\t\t%"PRIu64, retries);
- printf("\t%"PRIu64, failed_polls);
- printf("\t\t%"PRIu64, total_cycles/num_ops_received);
- printf("\t\t%"PRIu64, (total_cycles/num_ops_received) *
- burst_size);
- printf("\t\t%"PRIu64,
- total_cycles /
- (num_ops_received * pparams->buf_size));
- }
- printf("\n");
-
- for (i = 0; i < num_to_submit ; i++) {
- rte_pktmbuf_free(c_ops[i]->sym->m_src);
- rte_crypto_op_free(c_ops[i]);
- }
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
- rte_cryptodev_sym_session_free(sess);
-
- return TEST_SUCCESS;
-}
-
-static int
-test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
-{
- uint32_t num_to_submit = pparams->total_operations;
- struct rte_crypto_op *c_ops[num_to_submit];
- struct rte_crypto_op *proc_ops[num_to_submit];
- uint64_t failed_polls, retries, start_cycles, end_cycles,
- total_cycles = 0;
- uint32_t burst_sent = 0, burst_received = 0;
- uint32_t i, burst_size, num_sent, num_ops_received;
- uint32_t nb_ops;
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_armv8_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
-
- /* Generate Crypto op data structure(s)*/
- for (i = 0; i < num_to_submit ; i++) {
- struct rte_mbuf *m = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
- TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
-
- struct rte_crypto_op *op =
- rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
-
- op = test_perf_set_crypto_op_aes(op, m, sess, pparams->buf_size,
- pparams->chain);
- TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
-
- c_ops[i] = op;
- }
-
- printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
- "auth_algo:%s, Packet Size %u bytes",
- pmd_name(gbl_driver_id),
- ts_params->dev_id, 0,
- chain_mode_name(pparams->chain),
- rte_crypto_cipher_algorithm_strings[pparams->cipher_algo],
- pparams->key_length,
- rte_crypto_auth_algorithm_strings[pparams->auth_algo],
- pparams->buf_size);
- printf("\nOps Tx\tOps Rx\tOps/burst ");
- printf("Retries "
- "EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
-
- for (i = 2; i <= 128 ; i *= 2) {
- num_sent = 0;
- num_ops_received = 0;
- retries = 0;
- failed_polls = 0;
- burst_size = i;
- total_cycles = 0;
- while (num_sent < num_to_submit) {
- if ((num_to_submit - num_sent) < burst_size)
- nb_ops = num_to_submit - num_sent;
- else
- nb_ops = burst_size;
-
- start_cycles = rte_rdtsc();
- burst_sent = rte_cryptodev_enqueue_burst(
- ts_params->dev_id,
- 0, &c_ops[num_sent],
- nb_ops);
- end_cycles = rte_rdtsc();
-
- if (burst_sent == 0)
- retries++;
- num_sent += burst_sent;
- total_cycles += (end_cycles - start_cycles);
-
- start_cycles = rte_rdtsc();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops,
- burst_size);
- end_cycles = rte_rdtsc();
- if (burst_received < burst_sent)
- failed_polls++;
- num_ops_received += burst_received;
-
- total_cycles += end_cycles - start_cycles;
- }
-
- while (num_ops_received != num_to_submit) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(
- ts_params->dev_id, 0, NULL, 0);
-
- start_cycles = rte_rdtsc();
- burst_received = rte_cryptodev_dequeue_burst(
- ts_params->dev_id, 0, proc_ops, burst_size);
- end_cycles = rte_rdtsc();
-
- total_cycles += end_cycles - start_cycles;
- if (burst_received == 0)
- failed_polls++;
- num_ops_received += burst_received;
- }
-
- printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
- printf("\t\t%"PRIu64, retries);
- printf("\t%"PRIu64, failed_polls);
- printf("\t\t%"PRIu64, total_cycles/num_ops_received);
- printf("\t\t%"PRIu64,
- (total_cycles/num_ops_received)*burst_size);
- printf("\t\t%"PRIu64,
- total_cycles/(num_ops_received*pparams->buf_size));
- }
- printf("\n");
-
- for (i = 0; i < num_to_submit ; i++) {
- rte_pktmbuf_free(c_ops[i]->sym->m_src);
- rte_crypto_op_free(c_ops[i]);
- }
-
- return TEST_SUCCESS;
-}
-
-static uint32_t get_auth_key_max_length(enum rte_crypto_auth_algorithm algo)
-{
- switch (algo) {
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- return 16;
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- return 64;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- return 64;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- return 64;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- return 128;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- return 128;
- default:
- return 0;
- }
-}
-
-static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo)
-{
- switch (algo) {
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- return 4;
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- return TRUNCATED_DIGEST_BYTE_LENGTH_SHA1;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- return TRUNCATED_DIGEST_BYTE_LENGTH_SHA224;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- return TRUNCATED_DIGEST_BYTE_LENGTH_SHA256;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- return TRUNCATED_DIGEST_BYTE_LENGTH_SHA384;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- return TRUNCATED_DIGEST_BYTE_LENGTH_SHA512;
- default:
- return 0;
- }
-}
-
-static uint32_t get_aead_digest_length(enum rte_crypto_aead_algorithm algo)
-{
- switch (algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- return DIGEST_BYTE_LENGTH_AES_GCM;
- default:
- return 0;
- }
-}
-
-static uint8_t aes_key[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static uint8_t aes_iv[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static uint8_t aes_gcm_aad[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static uint8_t triple_des_key[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static uint8_t triple_des_iv[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static uint8_t hmac_sha_key[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static uint8_t snow3g_cipher_key[] = {
- 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
- 0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
-};
-
-static uint8_t snow3g_iv[] = {
- 0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00,
- 0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
-};
-
-static uint8_t snow3g_hash_key[] = {
- 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9,
- 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E
-};
-
-static int
-test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_crypto_sym_xform cipher_xform = { 0 };
- struct rte_crypto_sym_xform auth_xform = { 0 };
-
-
- /* Setup Cipher Parameters */
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.algo = cipher_algo;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- cipher_xform.cipher.key.data = aes_key;
- cipher_xform.cipher.key.length = cipher_key_len;
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
- if (chain != CIPHER_ONLY) {
- /* Setup HMAC Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
- auth_xform.auth.key.data = hmac_sha_key;
- auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
- auth_xform.auth.digest_length =
- get_auth_digest_length(auth_algo);
- }
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
- switch (chain) {
- case CIPHER_HASH:
- cipher_xform.next = &auth_xform;
- auth_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- case HASH_CIPHER:
- auth_xform.next = &cipher_xform;
- cipher_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &auth_xform,
- ts_params->sess_mp);
- case CIPHER_ONLY:
- cipher_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- default:
- return -1;
- }
-}
-
-#define SNOW3G_CIPHER_IV_LENGTH 16
-
-static int
-test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo, unsigned cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_crypto_sym_xform cipher_xform = {0};
- struct rte_crypto_sym_xform auth_xform = {0};
-
-
- /* Setup Cipher Parameters */
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.algo = cipher_algo;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- cipher_xform.cipher.key.data = snow3g_cipher_key;
- cipher_xform.cipher.key.length = cipher_key_len;
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
-
-
- /* Setup HMAC Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
-
- auth_xform.auth.key.data = snow3g_hash_key;
- auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
- auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
- /* Auth IV will be after cipher IV */
- auth_xform.auth.iv.offset = IV_OFFSET + SNOW3G_CIPHER_IV_LENGTH;
- auth_xform.auth.iv.length = SNOW3G_CIPHER_IV_LENGTH;
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
- switch (chain) {
- case CIPHER_HASH:
- cipher_xform.next = &auth_xform;
- auth_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- case HASH_CIPHER:
- auth_xform.next = &cipher_xform;
- cipher_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &auth_xform,
- ts_params->sess_mp);
- case CIPHER_ONLY:
- cipher_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- case HASH_ONLY:
- auth_xform.next = NULL;
- /* Create Crypto session */
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &auth_xform,
- ts_params->sess_mp);
- default:
- return -1;
- }
-}
-
-static int
-test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int key_len,
- enum rte_crypto_auth_algorithm auth_algo,
- enum rte_crypto_aead_algorithm aead_algo)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_crypto_sym_xform cipher_xform = { 0 };
- struct rte_crypto_sym_xform auth_xform = { 0 };
- struct rte_crypto_sym_xform aead_xform = { 0 };
-
- if (chain == CIPHER_HASH || chain == HASH_CIPHER) {
- /* Setup Cipher Parameters */
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.algo = cipher_algo;
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- switch (cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- cipher_xform.cipher.key.data = triple_des_key;
- cipher_xform.cipher.iv.length = TRIPLE_DES_CIPHER_IV_LENGTH;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- cipher_xform.cipher.key.data = aes_key;
- cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
- break;
- default:
- return -1;
- }
-
- cipher_xform.cipher.key.length = key_len;
-
- /* Setup Auth Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
-
- switch (auth_algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- auth_xform.auth.key.data = hmac_sha_key;
- break;
- default:
- return -1;
- }
-
- auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
- auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
- } else if (chain == AEAD) {
- /* Setup AEAD Parameters */
- aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
- aead_xform.aead.algo = aead_algo;
- aead_xform.aead.iv.offset = IV_OFFSET;
-
- switch (aead_algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- aead_xform.aead.key.data = aes_key;
- aead_xform.aead.iv.length = AES_CIPHER_IV_LENGTH;
- aead_xform.aead.aad_length = AES_GCM_AAD_LENGTH;
- aead_xform.aead.digest_length = get_aead_digest_length(aead_algo);
- break;
- default:
- return -1;
- }
-
- aead_xform.aead.key.length = key_len;
- }
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
- switch (chain) {
- case CIPHER_HASH:
- cipher_xform.next = &auth_xform;
- auth_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- case HASH_CIPHER:
- auth_xform.next = &cipher_xform;
- cipher_xform.next = NULL;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &auth_xform,
- ts_params->sess_mp);
- case AEAD:
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &aead_xform,
- ts_params->sess_mp);
- default:
- return -1;
- }
-}
-
-static int
-test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
- enum rte_crypto_cipher_algorithm cipher_algo,
- unsigned int cipher_key_len,
- enum rte_crypto_auth_algorithm auth_algo)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_crypto_sym_xform cipher_xform = { 0 };
- struct rte_crypto_sym_xform auth_xform = { 0 };
-
- /* Setup Cipher Parameters */
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.cipher.algo = cipher_algo;
-
- switch (cipher_algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- cipher_xform.cipher.key.data = aes_cbc_128_key;
- break;
- default:
- return -1;
- }
-
- cipher_xform.cipher.key.length = cipher_key_len;
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.iv.length = AES_CIPHER_IV_LENGTH;
-
- /* Setup Auth Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
-
- auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
-
- switch (chain) {
- case CIPHER_HASH:
- cipher_xform.next = &auth_xform;
- auth_xform.next = NULL;
- /* Encrypt and hash the result */
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &cipher_xform,
- ts_params->sess_mp);
- case HASH_CIPHER:
- auth_xform.next = &cipher_xform;
- cipher_xform.next = NULL;
- /* Hash encrypted message and decrypt */
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- /* Create Crypto session*/
- return rte_cryptodev_sym_session_init(dev_id,
- test_crypto_session, &auth_xform,
- ts_params->sess_mp);
- default:
- return -1;
- }
-}
-
-static struct rte_mbuf *
-test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
-{
- struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
-
- if (rte_pktmbuf_append(m, buf_sz) == NULL) {
- rte_pktmbuf_free(m);
- return NULL;
- }
-
- memset(rte_pktmbuf_mtod(m, uint8_t *), 0, buf_sz);
-
- return m;
-}
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain)
-{
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- /* Authentication Parameters */
- if (chain == CIPHER_ONLY) {
- op->sym->auth.digest.data = NULL;
- op->sym->auth.digest.phys_addr = 0;
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = 0;
- } else {
- op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(m,
- uint8_t *, data_len);
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- data_len);
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = data_len;
- }
-
-
- /* Copy the IV at the end of the crypto operation */
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- aes_iv, AES_CIPHER_IV_LENGTH);
-
- /* Cipher Parameters */
- op->sym->cipher.data.offset = 0;
- op->sym->cipher.data.length = data_len;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain __rte_unused)
-{
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- /* Authentication Parameters */
- op->sym->aead.digest.data = (uint8_t *)m->buf_addr +
- (m->data_off + data_len);
- op->sym->aead.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(m, data_len);
- op->sym->aead.aad.data = aes_gcm_aad;
-
- /* Copy IV at the end of the crypto operation */
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- aes_iv, AES_CIPHER_IV_LENGTH);
-
- /* Data lengths/offsets Parameters */
- op->sym->aead.data.offset = 0;
- op->sym->aead.data.length = data_len;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len)
-{
- uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *, IV_OFFSET);
-
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- rte_memcpy(iv_ptr, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
-
- /* Authentication Parameters */
- op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
- (m->data_off + data_len);
- op->sym->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(m, data_len);
-
- /* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = data_len << 3;
-
- op->sym->cipher.data.offset = 0;
- op->sym->cipher.data.length = data_len << 3;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_snow3g_cipher(struct rte_crypto_op *op,
- struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess,
- unsigned data_len)
-{
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- /* Copy IV at the end of the crypto operation */
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
-
- /* Cipher Parameters */
- op->sym->cipher.data.offset = 0;
- op->sym->cipher.data.length = data_len << 3;
-
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- snow3g_iv,
- SNOW3G_CIPHER_IV_LENGTH);
-
- op->sym->m_src = m;
-
- return op;
-}
-
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
- struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess,
- unsigned int data_len)
-{
- uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *, IV_OFFSET);
-
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- rte_memcpy(iv_ptr, snow3g_iv, SNOW3G_CIPHER_IV_LENGTH);
-
- /* Authentication Parameters */
-
- op->sym->auth.digest.data =
- (uint8_t *)rte_pktmbuf_mtod_offset(m, uint8_t *,
- data_len);
- op->sym->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(m, data_len +
- SNOW3G_CIPHER_IV_LENGTH);
-
- /* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = data_len << 3;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-
-static inline struct rte_crypto_op *
-test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- enum chain_mode chain __rte_unused)
-{
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- /* Authentication Parameters */
- op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
- (m->data_off + data_len);
- op->sym->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(m, data_len);
-
- /* Copy IV at the end of the crypto operation */
- rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
- triple_des_iv, TRIPLE_DES_CIPHER_IV_LENGTH);
-
- /* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = 0;
- op->sym->auth.data.length = data_len;
-
- op->sym->cipher.data.offset = 0;
- op->sym->cipher.data.length = data_len;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-/* An mbuf set is used in each burst. An mbuf can be used by multiple bursts at
- * same time, i.e. as they're not dereferenced there's no need to wait until
- * finished with to re-use */
-#define NUM_MBUF_SETS 8
-
-static int
-test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
- struct perf_test_params *pparams)
-{
- uint16_t i, k, l, m;
- uint16_t j = 0;
- uint16_t ops_unused = 0;
-
- uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
- uint64_t processed = 0, failed_polls = 0, retries = 0;
- uint64_t tsc_start = 0, tsc_end = 0;
-
- uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
-
- struct rte_crypto_op *ops[pparams->burst_size];
- struct rte_crypto_op *proc_ops[pparams->burst_size];
-
- struct rte_mbuf *mbufs[pparams->burst_size * 8];
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices available. Is kernel driver loaded?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_aes_sha_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate a burst of crypto operations */
- for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
- mbufs[i] = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
-
- if (mbufs[i] == NULL) {
- printf("\nFailed to get mbuf - freeing the rest.\n");
- for (k = 0; k < i; k++)
- rte_pktmbuf_free(mbufs[k]);
- return -1;
- }
-
- /* Make room for Digest in mbuf */
- if (pparams->chain != CIPHER_ONLY)
- rte_pktmbuf_append(mbufs[i], digest_length);
- }
-
-
- tsc_start = rte_rdtsc_precise();
-
- while (total_enqueued < pparams->total_operations) {
- uint16_t burst_size =
- total_enqueued+pparams->burst_size <= pparams->total_operations ?
- pparams->burst_size : pparams->total_operations-total_enqueued;
- uint16_t ops_needed = burst_size-ops_unused;
-
- if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
- printf("\nFailed to alloc enough ops, finish dequeuing "
- "and free ops below.");
- } else {
- for (i = 0; i < ops_needed; i++)
- ops[i] = test_perf_set_crypto_op_aes(ops[i],
- mbufs[i + (pparams->burst_size *
- (j % NUM_MBUF_SETS))],
- sess, pparams->buf_size,
- pparams->chain);
-
- /* enqueue burst */
- burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
- queue_id, ops, burst_size);
-
- if (burst_enqueued < burst_size)
- retries++;
-
- ops_unused = burst_size-burst_enqueued;
- total_enqueued += burst_enqueued;
- }
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (l = 0; l < burst_dequeued; l++)
- rte_crypto_op_free(proc_ops[l]);
- }
- j++;
- }
-
- /* Dequeue any operations still in the crypto device */
- while (processed < pparams->total_operations) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (m = 0; m < burst_dequeued; m++)
- rte_crypto_op_free(proc_ops[m]);
- }
- }
-
- tsc_end = rte_rdtsc_precise();
-
- double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
- double throughput = (ops_s * pparams->buf_size * 8) / 1000000000;
-
- printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size, ops_s/1000000,
- throughput, retries, failed_polls);
-
- for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
- rte_pktmbuf_free(mbufs[i]);
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
- rte_cryptodev_sym_session_free(sess);
-
- printf("\n");
- return TEST_SUCCESS;
-}
-
-
-static int
-test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
- struct perf_test_params *pparams)
-{
- uint16_t i, k, l, m;
- uint16_t j = 0;
- uint16_t ops_unused = 0;
- uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
- uint64_t processed = 0, failed_polls = 0, retries = 0;
- uint64_t tsc_start = 0, tsc_end = 0;
-
- uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
-
- struct rte_crypto_op *ops[pparams->burst_size];
- struct rte_crypto_op *proc_ops[pparams->burst_size];
-
- struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- printf("\nAnd is kernel driver loaded for HW PMDs?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_snow3g_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate a burst of crypto operations */
- for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
- /*
- * Buffer size is allocated, for perf tests they
- * are equal + digest len.
- */
- mbufs[i] = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size +
- digest_length);
-
- if (mbufs[i] == NULL) {
- printf("\nFailed to get mbuf - freeing the rest.\n");
- for (k = 0; k < i; k++)
- rte_pktmbuf_free(mbufs[k]);
- return -1;
- }
-
- }
-
- tsc_start = rte_rdtsc_precise();
-
- while (total_enqueued < pparams->total_operations) {
- uint16_t burst_size =
- (total_enqueued+pparams->burst_size)
- <= pparams->total_operations ?
- pparams->burst_size : pparams->total_operations-total_enqueued;
- uint16_t ops_needed = burst_size-ops_unused;
- /* Handle the last burst correctly */
- uint16_t op_offset = pparams->burst_size - burst_size;
-
- if (ops_needed !=
- rte_crypto_op_bulk_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- ops+op_offset, ops_needed)) {
- printf("\nFailed to alloc enough ops.");
- /*Don't exit, dequeue, more ops should become available*/
- } else {
- for (i = 0; i < ops_needed; i++) {
- if (pparams->chain == HASH_ONLY)
- ops[i+op_offset] =
- test_perf_set_crypto_op_snow3g_hash(ops[i+op_offset],
- mbufs[i +
- (pparams->burst_size * (j % NUM_MBUF_SETS))],
- sess,
- pparams->buf_size);
- else if (pparams->chain == CIPHER_ONLY)
- ops[i+op_offset] =
- test_perf_set_crypto_op_snow3g_cipher(ops[i+op_offset],
- mbufs[i +
- (pparams->burst_size * (j % NUM_MBUF_SETS))],
- sess,
- pparams->buf_size);
- else
- return 1;
- }
-
- /* enqueue burst */
- burst_enqueued =
- rte_cryptodev_enqueue_burst(dev_id, queue_id,
- ops+op_offset, burst_size);
-
- if (burst_enqueued < burst_size)
- retries++;
-
- ops_unused = burst_size-burst_enqueued;
- total_enqueued += burst_enqueued;
- }
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0) {
- failed_polls++;
- } else {
- processed += burst_dequeued;
- for (l = 0; l < burst_dequeued; l++)
- rte_crypto_op_free(proc_ops[l]);
- }
- j++;
- }
-
- /* Dequeue any operations still in the crypto device */
- while (processed < pparams->total_operations) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
- for (m = 0; m < burst_dequeued; m++)
- rte_crypto_op_free(proc_ops[m]);
- }
- }
-
- tsc_end = rte_rdtsc_precise();
-
- double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
- double cycles_burst = (double) (tsc_end - tsc_start) /
- (double) processed * pparams->burst_size;
- double cycles_buff = (double) (tsc_end - tsc_start) / (double) processed;
- double cycles_B = cycles_buff / pparams->buf_size;
- double throughput = (ops_s * pparams->buf_size * 8) / 1000000;
-
- if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) {
- /* Cycle count misleading on HW devices for this test, so don't print */
- printf("%4u\t%6.2f\t%10.2f\t n/a \t\t n/a "
- "\t\t n/a \t\t%8"PRIu64"\t%8"PRIu64,
- pparams->buf_size, ops_s/1000000,
- throughput, retries, failed_polls);
- } else {
- printf("%4u\t%6.2f\t%10.2f\t%10.2f\t%8.2f"
- "\t%8.2f\t%8"PRIu64"\t%8"PRIu64,
- pparams->buf_size, ops_s/1000000, throughput, cycles_burst,
- cycles_buff, cycles_B, retries, failed_polls);
- }
-
- for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
- rte_pktmbuf_free(mbufs[i]);
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
- rte_cryptodev_sym_session_free(sess);
-
- printf("\n");
- return TEST_SUCCESS;
-}
-
-static int
-test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
- struct perf_test_params *pparams)
-{
- uint16_t i, k, l, m;
- uint16_t j = 0;
- uint16_t ops_unused = 0;
-
- uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
- uint64_t processed = 0, failed_polls = 0, retries = 0;
- uint64_t tsc_start = 0, tsc_end = 0;
-
- struct rte_crypto_op *ops[pparams->burst_size];
- struct rte_crypto_op *proc_ops[pparams->burst_size];
-
- struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- static struct rte_crypto_op *(*test_perf_set_crypto_op)
- (struct rte_crypto_op *, struct rte_mbuf *,
- struct rte_cryptodev_sym_session *,
- unsigned int,
- enum chain_mode);
-
- if (pparams->chain == AEAD)
- test_perf_set_crypto_op =
- test_perf_set_crypto_op_aes_gcm;
- else {
- switch (pparams->cipher_algo) {
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
- break;
- default:
- return TEST_FAILED;
- }
- }
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_openssl_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo,
- pparams->aead_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate a burst of crypto operations */
- for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
- mbufs[i] = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
-
- if (mbufs[i] == NULL) {
- printf("\nFailed to get mbuf - freeing the rest.\n");
- for (k = 0; k < i; k++)
- rte_pktmbuf_free(mbufs[k]);
- return -1;
- }
- }
-
- tsc_start = rte_rdtsc_precise();
-
- while (total_enqueued < pparams->total_operations) {
- uint16_t burst_size =
- total_enqueued + pparams->burst_size <=
- pparams->total_operations ? pparams->burst_size :
- pparams->total_operations - total_enqueued;
- uint16_t ops_needed = burst_size - ops_unused;
-
- if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
- printf("\nFailed to alloc enough ops, finish dequeuing "
- "and free ops below.");
- } else {
- for (i = 0; i < ops_needed; i++)
- ops[i] = test_perf_set_crypto_op(ops[i],
- mbufs[i + (pparams->burst_size *
- (j % NUM_MBUF_SETS))],
- sess, pparams->buf_size,
- pparams->chain);
-
- /* enqueue burst */
- burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
- queue_id, ops, burst_size);
-
- if (burst_enqueued < burst_size)
- retries++;
-
- ops_unused = burst_size - burst_enqueued;
- total_enqueued += burst_enqueued;
- }
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (l = 0; l < burst_dequeued; l++)
- rte_crypto_op_free(proc_ops[l]);
- }
- j++;
- }
-
- /* Dequeue any operations still in the crypto device */
- while (processed < pparams->total_operations) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (m = 0; m < burst_dequeued; m++)
- rte_crypto_op_free(proc_ops[m]);
- }
- }
-
- tsc_end = rte_rdtsc_precise();
-
- double ops_s = ((double)processed / (tsc_end - tsc_start))
- * rte_get_tsc_hz();
- double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
- / 1000000000;
-
- printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
- ops_s / 1000000, throughput, retries, failed_polls);
-
- for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
- rte_pktmbuf_free(mbufs[i]);
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
- rte_cryptodev_sym_session_free(sess);
-
- printf("\n");
- return TEST_SUCCESS;
-}
-
-static int
-test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
- struct perf_test_params *pparams)
-{
- uint16_t i, k, l, m;
- uint16_t j = 0;
- uint16_t ops_unused = 0;
- uint16_t burst_size;
- uint16_t ops_needed;
-
- uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
- uint64_t processed = 0, failed_polls = 0, retries = 0;
- uint64_t tsc_start = 0, tsc_end = 0;
-
- struct rte_crypto_op *ops[pparams->burst_size];
- struct rte_crypto_op *proc_ops[pparams->burst_size];
-
- struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
-
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- static struct rte_cryptodev_sym_session *sess;
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices found. Is PMD build configured?\n");
- return TEST_FAILED;
- }
-
- /* Create Crypto session*/
- if (test_perf_create_armv8_session(ts_params->dev_id,
- pparams->chain, pparams->cipher_algo,
- pparams->key_length, pparams->auth_algo) == 0)
- sess = test_crypto_session;
- else
- sess = NULL;
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- /* Generate a burst of crypto operations */
- for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
- mbufs[i] = test_perf_create_pktmbuf(
- ts_params->mbuf_mp,
- pparams->buf_size);
-
- if (mbufs[i] == NULL) {
- printf("\nFailed to get mbuf - freeing the rest.\n");
- for (k = 0; k < i; k++)
- rte_pktmbuf_free(mbufs[k]);
- return -1;
- }
- }
-
- tsc_start = rte_rdtsc();
-
- while (total_enqueued < pparams->total_operations) {
- if ((total_enqueued + pparams->burst_size) <=
- pparams->total_operations)
- burst_size = pparams->burst_size;
- else
- burst_size = pparams->total_operations - total_enqueued;
-
- ops_needed = burst_size - ops_unused;
-
- if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
- printf("\nFailed to alloc enough ops, finish dequeuing "
- "and free ops below.");
- } else {
- for (i = 0; i < ops_needed; i++)
- ops[i] = test_perf_set_crypto_op_aes(ops[i],
- mbufs[i + (pparams->burst_size *
- (j % NUM_MBUF_SETS))], sess,
- pparams->buf_size,
- pparams->chain);
-
- /* enqueue burst */
- burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
- queue_id, ops, burst_size);
-
- if (burst_enqueued < burst_size)
- retries++;
-
- ops_unused = burst_size - burst_enqueued;
- total_enqueued += burst_enqueued;
- }
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (l = 0; l < burst_dequeued; l++)
- rte_crypto_op_free(proc_ops[l]);
- }
- j++;
- }
-
- /* Dequeue any operations still in the crypto device */
- while (processed < pparams->total_operations) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (m = 0; m < burst_dequeued; m++)
- rte_crypto_op_free(proc_ops[m]);
- }
- }
-
- tsc_end = rte_rdtsc();
-
- double ops_s = ((double)processed / (tsc_end - tsc_start))
- * rte_get_tsc_hz();
- double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
- / 1000000000;
-
- printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
- ops_s / 1000000, throughput, retries, failed_polls);
-
- for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
- rte_pktmbuf_free(mbufs[i]);
-
- printf("\n");
- return TEST_SUCCESS;
-}
-
-/*
-
- perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA1);
- perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_256);
- perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_512);
-
- perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA1);
- perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_256);
- perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_512);
-
- perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA1);
- perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_256);
- perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_512);
- */
-static int
-test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
-{
- unsigned total_operations = 1000000;
- unsigned burst_size = 32;
- unsigned buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536, 1792, 2048 };
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_ONLY,
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_NULL
- },
- {
- .chain = CIPHER_HASH,
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 32,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 32,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 32,
- .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
- },
- };
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
-
- params_set[i].total_operations = total_operations;
- params_set[i].burst_size = burst_size;
- printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- rte_crypto_cipher_algorithm_strings[params_set[i].cipher_algo],
- rte_crypto_auth_algorithm_strings[params_set[i].auth_algo],
- params_set[i].key_length,
- burst_size);
- printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
- "Retries\tEmptyPolls\n");
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
- params_set[i].buf_size = buf_lengths[j];
- test_perf_aes_sha(testsuite_params.dev_id, 0,
- &params_set[i]);
- }
- }
- return 0;
-}
-
-static int
-test_perf_snow3G_vary_pkt_size(void)
-{
- unsigned total_operations = 1000000;
- uint8_t i, j;
- unsigned k;
- uint16_t burst_sizes[] = { 64 };
- uint16_t buf_lengths[] = { 40, 64, 80, 120, 240, 256, 400, 512, 600, 1024, 2048 };
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_ONLY,
- .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_NULL,
- },
- {
- .chain = HASH_ONLY,
- .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
- .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- .key_length = 16
- },
- };
-
- printf("\n\nStart %s.", __func__);
- printf("\nTest to measure max throughput at various pkt sizes.");
- printf("\nOn HW devices t'put maximised when high Retries and EmptyPolls"
- " so cycle cost not relevant (n/a displayed).");
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- printf("\n\n");
- params_set[i].total_operations = total_operations;
- for (k = 0; k < RTE_DIM(burst_sizes); k++) {
- enum rte_crypto_cipher_algorithm cipher_algo =
- params_set[i].cipher_algo;
- enum rte_crypto_auth_algorithm auth_algo =
- params_set[i].auth_algo;
- printf("\nOn %s dev%u qp%u, %s, "
- "cipher algo:%s, auth algo:%s, burst_size: %d ops",
- pmd_name(gbl_driver_id),
- testsuite_params.dev_id, 0,
- chain_mode_name(params_set[i].chain),
- rte_crypto_cipher_algorithm_strings[cipher_algo],
- rte_crypto_auth_algorithm_strings[auth_algo],
- burst_sizes[k]);
-
- params_set[i].burst_size = burst_sizes[k];
- printf("\nPktSzB\tOp/s(M)\tThruput(Mbps)\tCycles/Burst\t"
- "Cycles/buf\tCycles/B\tRetries\t\tEmptyPolls\n");
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
-
- params_set[i].buf_size = buf_lengths[j];
-
- test_perf_snow3g(testsuite_params.dev_id, 0, &params_set[i]);
- }
- }
- }
-
- return 0;
-}
-
-static int
-test_perf_openssl_vary_pkt_size(void)
-{
- unsigned int total_operations = 10000;
- unsigned int burst_size = { 64 };
- unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
- 1792, 2048 };
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .key_length = 24,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .key_length = 32,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .key_length = 24,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = AEAD,
-
- .aead_algo = RTE_CRYPTO_AEAD_AES_GCM,
- .key_length = 16,
- },
- };
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- params_set[i].total_operations = total_operations;
- params_set[i].burst_size = burst_size;
- if (params_set[i].chain == AEAD) {
- enum rte_crypto_aead_algorithm aead_algo =
- params_set[i].aead_algo;
- printf("\n%s. aead algo: %s key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- rte_crypto_aead_algorithm_strings[aead_algo],
- params_set[i].key_length,
- burst_size);
- } else {
- enum rte_crypto_cipher_algorithm cipher_algo =
- params_set[i].cipher_algo;
- enum rte_crypto_auth_algorithm auth_algo =
- params_set[i].auth_algo;
- printf("\n%s. cipher algo: %s auth algo: %s key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- rte_crypto_cipher_algorithm_strings[cipher_algo],
- rte_crypto_auth_algorithm_strings[auth_algo],
- params_set[i].key_length,
- burst_size);
- }
- printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
- "EmptyPolls\n");
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
- params_set[i].buf_size = buf_lengths[j];
- test_perf_openssl(testsuite_params.dev_id, 0,
- &params_set[i]);
- }
- }
-
- return 0;
-}
-
-static int
-test_perf_openssl_vary_burst_size(void)
-{
- unsigned int total_operations = 4096;
- uint16_t buf_lengths[] = { 40 };
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .key_length = 24,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .key_length = 32,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .key_length = 24,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = AEAD,
-
- .aead_algo = RTE_CRYPTO_AEAD_AES_GCM,
- .key_length = 16,
- },
- };
-
- printf("\n\nStart %s.", __func__);
- printf("\nThis Test measures the average IA cycle cost using a "
- "constant request(packet) size. ");
- printf("Cycle cost is only valid when indicators show device is not"
- " busy, i.e. Retries and EmptyPolls = 0");
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- printf("\n");
- params_set[i].total_operations = total_operations;
-
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
- params_set[i].buf_size = buf_lengths[j];
- test_perf_openssl_optimise_cyclecount(&params_set[i]);
- }
- }
-
- return 0;
-}
-
-static int
-test_perf_armv8_vary_pkt_size(void)
-{
- unsigned int total_operations = 100000;
- unsigned int burst_size = { 64 };
- unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
- 1792, 2048 };
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = HASH_CIPHER,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- {
- .chain = HASH_CIPHER,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- };
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- params_set[i].total_operations = total_operations;
- params_set[i].burst_size = burst_size;
- printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set[i].chain),
- rte_crypto_cipher_algorithm_strings[params_set[i].cipher_algo],
- rte_crypto_auth_algorithm_strings[params_set[i].auth_algo],
- params_set[i].key_length,
- burst_size);
- printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
- "EmptyPolls\n");
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
- params_set[i].buf_size = buf_lengths[j];
- test_perf_armv8(testsuite_params.dev_id, 0,
- &params_set[i]);
- }
- }
-
- return 0;
-}
-
-static int
-test_perf_armv8_vary_burst_size(void)
-{
- unsigned int total_operations = 4096;
- uint16_t buf_lengths[] = { 64 };
- uint8_t i, j;
-
- struct perf_test_params params_set[] = {
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = HASH_CIPHER,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- },
- {
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- {
- .chain = HASH_CIPHER,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
- },
- };
-
- printf("\n\nStart %s.", __func__);
- printf("\nThis Test measures the average IA cycle cost using a "
- "constant request(packet) size. ");
- printf("Cycle cost is only valid when indicators show device is "
- "not busy, i.e. Retries and EmptyPolls = 0");
-
- for (i = 0; i < RTE_DIM(params_set); i++) {
- printf("\n");
- params_set[i].total_operations = total_operations;
-
- for (j = 0; j < RTE_DIM(buf_lengths); j++) {
- params_set[i].buf_size = buf_lengths[j];
- test_perf_armv8_optimise_cyclecount(&params_set[i]);
- }
- }
-
- return 0;
-}
-
-static int
-test_perf_aes_cbc_vary_burst_size(void)
-{
- return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
-}
-
-
-static struct rte_cryptodev_sym_session *
-test_perf_create_session(uint8_t dev_id, struct perf_test_params *pparams)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_crypto_sym_xform aead_xform = { 0 };
-
- uint8_t aead_key[pparams->session_attrs->key_aead_len];
-
- memcpy(aead_key, pparams->session_attrs->key_aead_data,
- pparams->session_attrs->key_aead_len);
-
- aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- aead_xform.next = NULL;
-
- aead_xform.aead.algo = pparams->session_attrs->aead_algorithm;
- aead_xform.aead.op = pparams->session_attrs->aead;
- aead_xform.aead.key.data = aead_key;
- aead_xform.aead.key.length = pparams->session_attrs->key_aead_len;
- aead_xform.aead.iv.length = pparams->session_attrs->iv_len;
- aead_xform.aead.iv.offset = IV_OFFSET;
- aead_xform.aead.aad_length = pparams->session_attrs->aad_len;
- aead_xform.aead.digest_length = pparams->session_attrs->digest_len;
-
- test_crypto_session = rte_cryptodev_sym_session_create(ts_params->sess_mp);
-
- rte_cryptodev_sym_session_init(dev_id, test_crypto_session,
- &aead_xform, ts_params->sess_mp);
-
- return test_crypto_session;
-}
-
-static inline struct rte_crypto_op *
-perf_gcm_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess,
- struct crypto_params *m_hlp,
- struct perf_test_params *params)
-{
- uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *, IV_OFFSET);
-
- if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
- rte_crypto_op_free(op);
- return NULL;
- }
-
- op->sym->aead.digest.data = m_hlp->digest;
- op->sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- m,
- params->session_attrs->aad_len +
- params->symmetric_op->p_len);
-
-
- op->sym->aead.aad.data = m_hlp->aad;
- op->sym->aead.aad.phys_addr = rte_pktmbuf_mtophys(m);
-
- rte_memcpy(op->sym->aead.aad.data, params->symmetric_op->aad_data,
- params->session_attrs->aad_len);
-
- rte_memcpy(iv_ptr, params->session_attrs->iv_data,
- params->session_attrs->iv_len);
- if (params->session_attrs->iv_len == 12)
- iv_ptr[15] = 1;
-
- op->sym->aead.data.offset =
- params->session_attrs->aad_len;
- op->sym->aead.data.length = params->symmetric_op->p_len;
-
- op->sym->m_src = m;
-
- return op;
-}
-
-static struct rte_mbuf *
-test_perf_create_pktmbuf_fill(struct rte_mempool *mpool,
- struct perf_test_params *params,
- unsigned buf_sz, struct crypto_params *m_hlp)
-{
- struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
- uint16_t aad_len = params->session_attrs->aad_len;
- uint16_t digest_size = params->symmetric_op->t_len;
- char *p;
-
- p = rte_pktmbuf_append(m, aad_len);
- if (p == NULL) {
- rte_pktmbuf_free(m);
- return NULL;
- }
- m_hlp->aad = (uint8_t *)p;
-
- p = rte_pktmbuf_append(m, buf_sz);
- if (p == NULL) {
- rte_pktmbuf_free(m);
- return NULL;
- }
- rte_memcpy(p, params->symmetric_op->p_data, buf_sz);
-
- p = rte_pktmbuf_append(m, digest_size);
- if (p == NULL) {
- rte_pktmbuf_free(m);
- return NULL;
- }
- m_hlp->digest = (uint8_t *)p;
-
- return m;
-}
-
-static int
-perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
- struct perf_test_params *pparams, uint32_t test_ops)
-{
- int j = 0;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *ops[pparams->burst_size];
- struct rte_crypto_op *proc_ops[pparams->burst_size];
- uint32_t total_operations = pparams->total_operations;
-
- uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
- uint64_t processed = 0, failed_polls = 0, retries = 0;
- uint64_t tsc_start = 0, tsc_end = 0;
-
- uint16_t i = 0, l = 0, m = 0;
- uint16_t burst = pparams->burst_size * NUM_MBUF_SETS;
- uint16_t ops_unused = 0;
-
- struct rte_mbuf *mbufs[burst];
- struct crypto_params m_hlp[burst];
-
- if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices available. "
- "Is kernel driver loaded?\n");
- return TEST_FAILED;
- }
-
- sess = test_perf_create_session(dev_id, pparams);
- TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
-
- for (i = 0; i < burst; i++) {
- mbufs[i] = test_perf_create_pktmbuf_fill(
- ts_params->mbuf_mp,
- pparams, pparams->symmetric_op->p_len,
- &m_hlp[i]);
- }
-
- if (test_ops)
- total_operations = test_ops;
-
- tsc_start = rte_rdtsc_precise();
- while (total_enqueued < total_operations) {
- uint16_t burst_size =
- total_enqueued+pparams->burst_size <= total_operations ?
- pparams->burst_size : total_operations-total_enqueued;
- uint16_t ops_needed = burst_size-ops_unused;
-
- if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
- printf("\nFailed to alloc enough ops, "
- "finish dequeuing");
- } else {
- for (i = 0; i < ops_needed; i++)
- ops[i] = perf_gcm_set_crypto_op(ops[i],
- mbufs[i + (pparams->burst_size *
- (j % NUM_MBUF_SETS))],
- sess, &m_hlp[i + (pparams->burst_size *
- (j % NUM_MBUF_SETS))], pparams);
-
- /* enqueue burst */
- burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
- queue_id, ops, burst_size);
-
- if (burst_enqueued < burst_size)
- retries++;
-
- ops_unused = burst_size-burst_enqueued;
- total_enqueued += burst_enqueued;
- }
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (l = 0; l < burst_dequeued; l++)
- rte_crypto_op_free(proc_ops[l]);
- }
-
- j++;
- }
-
- /* Dequeue any operations still in the crypto device */
- while (processed < total_operations) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
-
- /* dequeue burst */
- burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
- proc_ops, pparams->burst_size);
- if (burst_dequeued == 0)
- failed_polls++;
- else {
- processed += burst_dequeued;
-
- for (m = 0; m < burst_dequeued; m++) {
- if (test_ops) {
- uint8_t *pkt = rte_pktmbuf_mtod(
- proc_ops[m]->sym->m_src,
- uint8_t *);
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- pparams->symmetric_op->c_data,
- pkt +
- pparams->session_attrs->aad_len,
- pparams->symmetric_op->c_len,
- "GCM Ciphertext data not as expected");
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- pparams->symmetric_op->t_data,
- pkt +
- pparams->session_attrs->aad_len +
- pparams->symmetric_op->c_len,
- pparams->symmetric_op->t_len,
- "GCM MAC data not as expected");
-
- }
- rte_crypto_op_free(proc_ops[m]);
- }
- }
- }
-
- tsc_end = rte_rdtsc_precise();
-
- double ops_s = ((double)processed / (tsc_end - tsc_start))
- * rte_get_tsc_hz();
- double throughput = (ops_s * pparams->symmetric_op->p_len * 8)
- / 1000000000;
-
- if (!test_ops) {
- printf("\n%u\t\t%6.2f\t%16.2f\t%8"PRIu64"\t%10"PRIu64,
- pparams->symmetric_op->p_len,
- ops_s/1000000, throughput, retries, failed_polls);
- }
-
- for (i = 0; i < burst; i++)
- rte_pktmbuf_free(mbufs[i]);
-
- rte_cryptodev_sym_session_clear(ts_params->dev_id, sess);
- rte_cryptodev_sym_session_free(sess);
-
- return 0;
-}
-
-static int
-test_perf_AES_GCM(int continual_buf_len, int continual_size)
-{
- uint16_t i, j, k, loops = 1;
-
- uint16_t buf_lengths[] = { 64, 128, 256, 512, 1024, 1536, 2048 };
-
- static const struct cryptodev_perf_test_data *gcm_tests[] = {
- &AES_GCM_128_12IV_0AAD
- };
-
- if (continual_buf_len)
- loops = continual_size;
-
- int TEST_CASES_GCM = RTE_DIM(gcm_tests);
-
- const unsigned burst_size = 32;
-
- struct symmetric_op ops_set[TEST_CASES_GCM];
- struct perf_test_params params_set[TEST_CASES_GCM];
- struct symmetric_session_attrs session_attrs[TEST_CASES_GCM];
- static const struct cryptodev_perf_test_data *gcm_test;
-
- for (i = 0; i < TEST_CASES_GCM; ++i) {
-
- gcm_test = gcm_tests[i];
-
- session_attrs[i].aead =
- RTE_CRYPTO_AEAD_OP_ENCRYPT;
- session_attrs[i].aead_algorithm =
- RTE_CRYPTO_AEAD_AES_GCM;
- session_attrs[i].key_aead_data =
- gcm_test->key.data;
- session_attrs[i].key_aead_len =
- gcm_test->key.len;
- session_attrs[i].aad_len = gcm_test->aad.len;
- session_attrs[i].digest_len =
- gcm_test->auth_tag.len;
- session_attrs[i].iv_len = gcm_test->iv.len;
- session_attrs[i].iv_data = gcm_test->iv.data;
-
- ops_set[i].aad_data = gcm_test->aad.data;
- ops_set[i].p_data = gcm_test->plaintext.data;
- ops_set[i].p_len = buf_lengths[i];
- ops_set[i].c_data = gcm_test->ciphertext.data;
- ops_set[i].c_len = buf_lengths[i];
- ops_set[i].t_data = gcm_test->auth_tags[i].data;
- ops_set[i].t_len = gcm_test->auth_tags[i].len;
-
- params_set[i].chain = AEAD;
- params_set[i].session_attrs = &session_attrs[i];
- params_set[i].symmetric_op = &ops_set[i];
- if (continual_buf_len)
- params_set[i].total_operations = 0xFFFFFF;
- else
- params_set[i].total_operations = 1000000;
-
- params_set[i].burst_size = burst_size;
-
- }
-
- if (continual_buf_len)
- printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
- " burst size: %u", "AES_GCM", "AES_GCM",
- gcm_test->key.len << 3, burst_size);
-
- for (i = 0; i < RTE_DIM(gcm_tests); i++) {
-
- if (!continual_buf_len) {
- printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
- " burst size: %u", "AES_GCM", "AES_GCM",
- gcm_test->key.len << 3, burst_size);
- printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
- " Retries\tEmptyPolls");
- }
-
- uint16_t len = RTE_DIM(buf_lengths);
- uint16_t p = 0;
-
- if (continual_buf_len) {
- for (k = 0; k < RTE_DIM(buf_lengths); k++)
- if (buf_lengths[k] == continual_buf_len) {
- len = k + 1;
- p = k;
- break;
- }
- }
- for (j = p; j < len; ++j) {
-
- params_set[i].symmetric_op->c_len = buf_lengths[j];
- params_set[i].symmetric_op->p_len = buf_lengths[j];
-
- ops_set[i].t_data = gcm_tests[i]->auth_tags[j].data;
- ops_set[i].t_len = gcm_tests[i]->auth_tags[j].len;
-
- /* Run is twice, one for encryption/hash checks,
- * one for perf
- */
- if (perf_AES_GCM(testsuite_params.dev_id, 0,
- &params_set[i], 1))
- return TEST_FAILED;
-
- for (k = 0; k < loops; k++) {
- if (continual_buf_len)
- printf("\n\nBuffer Size(B)\tOPS(M)\t"
- "Throughput(Gbps)\t"
- "Retries\tEmptyPolls");
- if (perf_AES_GCM(testsuite_params.dev_id, 0,
- &params_set[i], 0))
- return TEST_FAILED;
- if (continual_buf_len)
- printf("\n\nCompleted loop %i of %i ...",
- k+1, loops);
- }
- }
-
- }
- printf("\n");
- return 0;
-}
-
-static int test_cryptodev_perf_AES_GCM(void)
-{
- return test_perf_AES_GCM(0, 0);
-}
-/*
- * This function calls AES GCM performance tests providing
- * size of packet as an argument. If size of packet is not
- * in the buf_lengths array, all sizes will be used
- */
-static int test_continual_perf_AES_GCM(void)
-{
- return test_perf_AES_GCM(1024, 10);
-}
-
-static int
-test_perf_continual_performance_test(void)
-{
- unsigned int total_operations = 0xFFFFFF;
- unsigned int total_loops = 10;
- unsigned int burst_size = 32;
- uint8_t i;
-
- struct perf_test_params params_set = {
- .total_operations = total_operations,
- .burst_size = burst_size,
- .buf_size = 1024,
-
- .chain = CIPHER_HASH,
-
- .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .key_length = 16,
- .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
- };
-
- for (i = 1; i <= total_loops; ++i) {
- printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
- " burst_size: %d ops\n",
- chain_mode_name(params_set.chain),
- rte_crypto_cipher_algorithm_strings[params_set.cipher_algo],
- rte_crypto_auth_algorithm_strings[params_set.auth_algo],
- params_set.key_length,
- burst_size);
- printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
- "Retries\tEmptyPolls\n");
- test_perf_aes_sha(testsuite_params.dev_id, 0,
- &params_set);
- printf("\nCompleted loop %i of %i ...", i, total_loops);
- }
- return 0;
-}
-
-static struct unit_test_suite cryptodev_qat_continual_testsuite = {
- .suite_name = "Crypto Device Continual Performance Test",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_continual_performance_test),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_continual_perf_AES_GCM),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_testsuite = {
- .suite_name = "Crypto Device Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_cryptodev_perf_AES_GCM),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_aes_cbc_vary_burst_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
- .suite_name = "Crypto Device DPAA2_SEC Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_gcm_testsuite = {
- .suite_name = "Crypto Device AESNI GCM Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_cryptodev_perf_AES_GCM),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_aes_testsuite = {
- .suite_name = "Crypto Device AESNI MB Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_snow3g_testsuite = {
- .suite_name = "Crypto Device SNOW3G Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_snow3G_vary_pkt_size),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_snow3G_vary_burst_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_openssl_testsuite = {
- .suite_name = "Crypto Device OPENSSL Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_openssl_vary_pkt_size),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_openssl_vary_burst_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static struct unit_test_suite cryptodev_armv8_testsuite = {
- .suite_name = "Crypto Device ARMv8 Unit Test Suite",
- .setup = testsuite_setup,
- .teardown = testsuite_teardown,
- .unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_armv8_vary_pkt_size),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_armv8_vary_burst_size),
- TEST_CASES_END() /**< NULL terminate unit test array */
- }
-};
-
-static int
-perftest_aesni_gcm_cryptodev(void)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "AESNI GCM PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_gcm_testsuite);
-}
-
-static int
-perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "AESNI MB PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_AESNI_MB is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_aes_testsuite);
-}
-
-static int
-perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_testsuite);
-}
-
-static int
-perftest_sw_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "SNOW3G PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_SNOW3G is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
-}
-
-static int
-perftest_qat_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
-}
-
-static int
-perftest_openssl_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "OpenSSL PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_OPENSSL is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_openssl_testsuite);
-}
-
-static int
-perftest_qat_continual_cryptodev(void)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_qat_continual_testsuite);
-}
-
-static int
-perftest_sw_armv8_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "ARMV8 PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_ARMV8 is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_armv8_testsuite);
-}
-
-static int
-perftest_dpaa2_sec_cryptodev(void)
-{
- gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
-
- if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "DPAA2 SEC PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC is enabled "
- "in config file to run this testsuite.\n");
- return TEST_FAILED;
- }
-
- return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
-}
-
-REGISTER_TEST_COMMAND(cryptodev_aesni_mb_perftest, perftest_aesni_mb_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_qat_perftest, perftest_qat_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_perftest, perftest_sw_snow3g_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_qat_snow3g_perftest, perftest_qat_snow3g_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_perftest, perftest_aesni_gcm_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_openssl_perftest,
- perftest_openssl_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_qat_continual_perftest,
- perftest_qat_continual_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_sw_armv8_perftest,
- perftest_sw_armv8_cryptodev);
-REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_perftest,
- perftest_dpaa2_sec_cryptodev);
diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 594d79d7..310109e6 100644
--- a/test/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
@@ -51,11 +51,7 @@
#include "process.h"
-#ifdef RTE_LIBRTE_XEN_DOM0
-#define DEFAULT_MEM_SIZE "30"
-#else
#define DEFAULT_MEM_SIZE "18"
-#endif
#define mp_flag "--proc-type=secondary"
#define no_hpet "--no-hpet"
#define no_huge "--no-huge"
@@ -809,72 +805,6 @@ test_no_huge_flag(void)
return 0;
}
-#ifdef RTE_LIBRTE_XEN_DOM0
-static int
-test_dom0_misc_flags(void)
-{
- char prefix[PATH_MAX], tmp[PATH_MAX];
-
- if (get_current_prefix(tmp, sizeof(tmp)) == NULL) {
- printf("Error - unable to get current prefix!\n");
- return -1;
- }
- snprintf(prefix, sizeof(prefix), "--file-prefix=%s", tmp);
-
- /* check that some general flags don't prevent things from working.
- * All cases, apart from the first, app should run.
- * No further testing of output done.
- */
- /* sanity check - failure with invalid option */
- const char *argv0[] = {prgname, prefix, mp_flag, "-c", "1", "--invalid-opt"};
-
- /* With --no-pci */
- const char *argv1[] = {prgname, prefix, mp_flag, "-c", "1", "--no-pci"};
- /* With -v */
- const char *argv2[] = {prgname, prefix, mp_flag, "-c", "1", "-v"};
- /* With valid --syslog */
- const char *argv3[] = {prgname, prefix, mp_flag, "-c", "1",
- "--syslog", "syslog"};
- /* With empty --syslog (should fail) */
- const char *argv4[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog"};
- /* With invalid --syslog */
- const char *argv5[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog", "error"};
- /* With no-sh-conf */
- const char *argv6[] = {prgname, "-c", "1", "-n", "2", "-m", "20",
- "--no-shconf", "--file-prefix=noshconf" };
-
- if (launch_proc(argv0) == 0) {
- printf("Error - process ran ok with invalid flag\n");
- return -1;
- }
- if (launch_proc(argv1) != 0) {
- printf("Error - process did not run ok with --no-pci flag\n");
- return -1;
- }
- if (launch_proc(argv2) != 0) {
- printf("Error - process did not run ok with -v flag\n");
- return -1;
- }
- if (launch_proc(argv3) != 0) {
- printf("Error - process did not run ok with --syslog flag\n");
- return -1;
- }
- if (launch_proc(argv4) == 0) {
- printf("Error - process run ok with empty --syslog flag\n");
- return -1;
- }
- if (launch_proc(argv5) == 0) {
- printf("Error - process run ok with invalid --syslog flag\n");
- return -1;
- }
- if (launch_proc(argv6) != 0) {
- printf("Error - process did not run ok with --no-shconf flag\n");
- return -1;
- }
-
- return 0;
-}
-#else
static int
test_misc_flags(void)
{
@@ -1061,7 +991,6 @@ test_misc_flags(void)
}
return 0;
}
-#endif
static int
test_file_prefix(void)
@@ -1098,9 +1027,6 @@ test_file_prefix(void)
printf("Error - unable to get current prefix!\n");
return -1;
}
-#ifdef RTE_LIBRTE_XEN_DOM0
- return 0;
-#endif
/* check if files for current prefix are present */
if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
@@ -1299,9 +1225,6 @@ test_memory_flags(void)
printf("Error - process failed with valid -m flag!\n");
return -1;
}
-#ifdef RTE_LIBRTE_XEN_DOM0
- return 0;
-#endif
if (launch_proc(argv2) == 0) {
printf("Error - process run ok with invalid (zero) --socket-mem!\n");
return -1;
@@ -1427,11 +1350,7 @@ test_eal_flags(void)
return ret;
}
-#ifdef RTE_LIBRTE_XEN_DOM0
- ret = test_dom0_misc_flags();
-#else
ret = test_misc_flags();
-#endif
if (ret < 0) {
printf("Error in test_misc_flags()");
return ret;
diff --git a/test/test/test_event_eth_rx_adapter.c b/test/test/test_event_eth_rx_adapter.c
new file mode 100644
index 00000000..90a5c646
--- /dev/null
+++ b/test/test/test_event_eth_rx_adapter.c
@@ -0,0 +1,454 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+
+#include <rte_event_eth_rx_adapter.h>
+
+#include "test.h"
+
+#define MAX_NUM_RX_QUEUE 64
+#define NB_MBUFS (8192 * num_ports * MAX_NUM_RX_QUEUE)
+#define MBUF_CACHE_SIZE 512
+#define MBUF_PRIV_SIZE 0
+#define TEST_INST_ID 0
+#define TEST_DEV_ID 0
+#define TEST_ETHDEV_ID 0
+
+struct event_eth_rx_adapter_test_params {
+ struct rte_mempool *mp;
+ uint16_t rx_rings, tx_rings;
+ uint32_t caps;
+};
+
+static struct event_eth_rx_adapter_test_params default_params;
+
+static inline int
+port_init(uint8_t port, struct rte_mempool *mp)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_hf = ETH_RSS_IP |
+ ETH_RSS_TCP |
+ ETH_RSS_UDP,
+ }
+ }
+ };
+ const uint16_t rx_ring_size = 512, tx_ring_size = 512;
+ struct rte_eth_conf port_conf = port_conf_default;
+ int retval;
+ uint16_t q;
+ struct rte_eth_dev_info dev_info;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ retval = rte_eth_dev_configure(port, 0, 0, &port_conf);
+
+ rte_eth_dev_info_get(port, &dev_info);
+
+ default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
+ MAX_NUM_RX_QUEUE);
+ default_params.tx_rings = 1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, default_params.rx_rings,
+ default_params.tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ for (q = 0; q < default_params.rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
+ rte_eth_dev_socket_id(port), NULL, mp);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < default_params.tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ struct ether_addr addr;
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ (unsigned int)port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+ return 0;
+}
+
+static int
+init_ports(int num_ports)
+{
+ uint8_t portid;
+ int retval;
+
+ default_params.mp = rte_pktmbuf_pool_create("packet_pool",
+ NB_MBUFS,
+ MBUF_CACHE_SIZE,
+ MBUF_PRIV_SIZE,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (!default_params.mp)
+ return -ENOMEM;
+
+ for (portid = 0; portid < num_ports; portid++) {
+ retval = port_init(portid, default_params.mp);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int
+testsuite_setup(void)
+{
+ int err;
+ uint8_t count;
+ struct rte_event_dev_info dev_info;
+
+ count = rte_event_dev_count();
+ if (!count) {
+ printf("Failed to find a valid event device,"
+ " testing with event_skeleton device\n");
+ rte_vdev_init("event_skeleton", NULL);
+ }
+
+ struct rte_event_dev_config config = {
+ .nb_event_queues = 1,
+ .nb_event_ports = 1,
+ };
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+ config.nb_event_queue_flows = dev_info.max_event_queue_flows;
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+ config.nb_events_limit =
+ dev_info.max_num_events;
+ err = rte_event_dev_configure(TEST_DEV_ID, &config);
+ TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
+ err);
+
+ /*
+ * eth devices like octeontx use event device to receive packets
+ * so rte_eth_dev_start invokes rte_event_dev_start internally, so
+ * call init_ports after rte_event_dev_configure
+ */
+ err = init_ports(rte_eth_dev_count());
+ TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+ &default_params.caps);
+ TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n",
+ err);
+
+ return err;
+}
+
+static void
+testsuite_teardown(void)
+{
+ uint32_t i;
+ for (i = 0; i < rte_eth_dev_count(); i++)
+ rte_eth_dev_stop(i);
+
+ rte_mempool_free(default_params.mp);
+}
+
+static int
+adapter_create(void)
+{
+ int err;
+ struct rte_event_dev_info dev_info;
+ struct rte_event_port_conf rx_p_conf;
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ rx_p_conf.new_event_threshold = dev_info.max_num_events;
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+ err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+ &rx_p_conf);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ return err;
+}
+
+static void
+adapter_free(void)
+{
+ rte_event_eth_rx_adapter_free(TEST_INST_ID);
+}
+
+static int
+adapter_create_free(void)
+{
+ int err;
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+ NULL);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
+ &rx_p_conf);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_create(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf);
+ TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
+
+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+ err = rte_event_eth_rx_adapter_free(1);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_queue_add_del(void)
+{
+ int err;
+ struct rte_event ev;
+ uint32_t cap;
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+ &cap);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+ ev.flow_id = 1;
+ queue_config.rx_queue_flags =
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ }
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ rte_eth_dev_count(),
+ -1, &queue_config);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ } else {
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0,
+ &queue_config);
+ TEST_ASSERT(err == -EINVAL, "Expected EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ err = rte_event_eth_rx_adapter_queue_add(1, TEST_ETHDEV_ID, -1,
+ &queue_config);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_start_stop(void)
+{
+ int err;
+ struct rte_event ev;
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ queue_config.rx_queue_flags = 0;
+ if (default_params.caps &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+ ev.flow_id = 1;
+ queue_config.rx_queue_flags =
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ }
+
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
+ -1, &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_start(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_stop(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_start(1);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_stop(1);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_stats(void)
+{
+ int err;
+ struct rte_event_eth_rx_adapter_stats stats;
+
+ err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, NULL);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_stats_get(TEST_INST_ID, &stats);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_stats_get(1, &stats);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite service_tests = {
+ .suite_name = "rx event eth adapter test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL, adapter_create_free),
+ TEST_CASE_ST(adapter_create, adapter_free,
+ adapter_queue_add_del),
+ TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
+ TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_eth_rx_adapter_common(void)
+{
+ return unit_test_suite_runner(&service_tests);
+}
+
+REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
+ test_event_eth_rx_adapter_common);
diff --git a/test/test/test_eventdev.c b/test/test/test_eventdev.c
index f766191e..ba39cbac 100644
--- a/test/test/test_eventdev.c
+++ b/test/test/test_eventdev.c
@@ -37,6 +37,7 @@
#include <rte_memcpy.h>
#include <rte_eventdev.h>
#include <rte_dev.h>
+#include <rte_bus_vdev.h>
#include "test.h"
@@ -273,7 +274,12 @@ test_eventdev_queue_default_conf_get(void)
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
&qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
@@ -295,15 +301,13 @@ test_eventdev_queue_setup(void)
/* Negative cases */
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
qconf.nb_atomic_flows = info.max_event_queue_flows;
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
@@ -318,8 +322,12 @@ test_eventdev_queue_setup(void)
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
@@ -336,14 +344,18 @@ test_eventdev_queue_count(void)
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- TEST_ASSERT_EQUAL(rte_event_queue_count(TEST_DEV_ID),
- info.max_event_queues, "Wrong queue count");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
+ "Wrong queue count");
return TEST_SUCCESS;
}
static int
-test_eventdev_queue_priority(void)
+test_eventdev_queue_attr_priority(void)
{
int i, ret;
struct rte_event_dev_info info;
@@ -353,7 +365,12 @@ test_eventdev_queue_priority(void)
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
&qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
@@ -362,8 +379,13 @@ test_eventdev_queue_priority(void)
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
- priority = rte_event_queue_priority(TEST_DEV_ID, i);
+ for (i = 0; i < (int)queue_count; i++) {
+ uint32_t tmp;
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
+ "Queue priority get failed");
+ priority = tmp;
+
if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
TEST_ASSERT_EQUAL(priority,
i % RTE_EVENT_DEV_PRIORITY_LOWEST,
@@ -378,6 +400,134 @@ test_eventdev_queue_priority(void)
}
static int
+test_eventdev_queue_attr_nb_atomic_flows(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_flows;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_flows == 0)
+ /* Assume PMD doesn't support atomic flows, return early */
+ return -ENOTSUP;
+
+ qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
+ &nb_atomic_flows),
+ "Queue nb_atomic_flows get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
+ "Wrong atomic flows value for queue%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_nb_atomic_order_sequences(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t nb_atomic_order_sequences;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
+
+ if (qconf.nb_atomic_order_sequences == 0)
+ /* Assume PMD doesn't support reordering */
+ return -ENOTSUP;
+
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
+ &nb_atomic_order_sequences),
+ "Queue nb_atomic_order_sequencess get failed");
+
+ TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
+ qconf.nb_atomic_order_sequences,
+ "Wrong atomic order sequences value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_attr_event_queue_cfg(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint32_t event_queue_cfg;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
+
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < (int)queue_count; i++) {
+ TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
+ RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
+ &event_queue_cfg),
+ "Queue event_queue_cfg get failed");
+
+ TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
+ "Wrong event_queue_cfg value for queue%d",
+ i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
test_eventdev_port_default_conf_get(void)
{
int i, ret;
@@ -386,11 +536,16 @@ test_eventdev_port_default_conf_get(void)
ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
ret = rte_event_port_default_conf_get(TEST_DEV_ID,
- rte_event_port_count(TEST_DEV_ID) + 1, NULL);
+ port_count + 1, NULL);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
&pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
@@ -436,8 +591,12 @@ test_eventdev_port_setup(void)
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
@@ -446,7 +605,32 @@ test_eventdev_port_setup(void)
}
static int
-test_eventdev_dequeue_depth(void)
+test_eventdev_port_attr_dequeue_depth(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
+ 0, "Call to get port dequeue depth failed");
+ TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
+ "Wrong port dequeue depth");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_port_attr_enqueue_depth(void)
{
int ret;
struct rte_event_dev_info info;
@@ -460,14 +644,18 @@ test_eventdev_dequeue_depth(void)
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
- TEST_ASSERT_EQUAL(rte_event_port_dequeue_depth(TEST_DEV_ID, 0),
- pconf.dequeue_depth, "Wrong port dequeue depth");
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
+ 0, "Call to get port enqueue depth failed");
+ TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
+ "Wrong port enqueue depth");
return TEST_SUCCESS;
}
static int
-test_eventdev_enqueue_depth(void)
+test_eventdev_port_attr_new_event_threshold(void)
{
int ret;
struct rte_event_dev_info info;
@@ -481,8 +669,12 @@ test_eventdev_enqueue_depth(void)
ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
- TEST_ASSERT_EQUAL(rte_event_port_enqueue_depth(TEST_DEV_ID, 0),
- pconf.enqueue_depth, "Wrong port enqueue depth");
+ uint32_t value;
+ TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
+ RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
+ 0, "Call to get port new event threshold failed");
+ TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
+ "Wrong port new event threshold");
return TEST_SUCCESS;
}
@@ -496,8 +688,11 @@ test_eventdev_port_count(void)
ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
- TEST_ASSERT_EQUAL(rte_event_port_count(TEST_DEV_ID),
- info.max_event_ports, "Wrong port count");
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
return TEST_SUCCESS;
}
@@ -524,19 +719,28 @@ test_eventdev_start_stop(void)
ret = eventdev_configure_setup();
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
- TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
- "Failed to link port, device %d", TEST_DEV_ID);
+ TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
+ TEST_DEV_ID);
ret = rte_event_dev_start(TEST_DEV_ID);
TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
@@ -554,19 +758,28 @@ eventdev_setup_device(void)
ret = eventdev_configure_setup();
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
- for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
}
- for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
}
ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
- TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
- "Failed to link port, device %d", TEST_DEV_ID);
+ TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
+ TEST_DEV_ID);
ret = rte_event_dev_start(TEST_DEV_ID);
TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
@@ -591,7 +804,11 @@ test_eventdev_link(void)
TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ nb_queues = queue_count;
for (i = 0; i < nb_queues; i++) {
queues[i] = i;
priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
@@ -614,7 +831,11 @@ test_eventdev_unlink(void)
TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ nb_queues = queue_count;
for (i = 0; i < nb_queues; i++)
queues[i] = i;
@@ -628,7 +849,7 @@ test_eventdev_unlink(void)
static int
test_eventdev_link_get(void)
{
- int ret, nb_queues, i;
+ int ret, i;
uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
@@ -637,7 +858,11 @@ test_eventdev_link_get(void)
TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
TEST_DEV_ID);
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
+ "Queue count get failed");
+ const int nb_queues = queue_count;
for (i = 0; i < nb_queues; i++)
queues[i] = i;
@@ -649,7 +874,6 @@ test_eventdev_link_get(void)
TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
/* link all queues and get the links */
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
for (i = 0; i < nb_queues; i++) {
queues[i] = i;
priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
@@ -679,8 +903,8 @@ test_eventdev_link_get(void)
ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
TEST_DEV_ID, ret);
+
/* 4links and 2 unlinks */
- nb_queues = rte_event_queue_count(TEST_DEV_ID);
if (nb_queues >= 4) {
for (i = 0; i < 4; i++) {
queues[i] = i;
@@ -741,15 +965,23 @@ static struct unit_test_suite eventdev_common_testsuite = {
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_queue_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_queue_priority),
+ test_eventdev_queue_attr_priority),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_flows),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_nb_atomic_order_sequences),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_attr_event_queue_cfg),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_default_conf_get),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_setup),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_dequeue_depth),
+ test_eventdev_port_attr_dequeue_depth),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_attr_enqueue_depth),
TEST_CASE_ST(eventdev_configure_setup, NULL,
- test_eventdev_enqueue_depth),
+ test_eventdev_port_attr_new_event_threshold),
TEST_CASE_ST(eventdev_configure_setup, NULL,
test_eventdev_port_count),
TEST_CASE_ST(eventdev_configure_setup, NULL,
diff --git a/test/test/test_eventdev_octeontx.c b/test/test/test_eventdev_octeontx.c
index 774d0303..dbc36d94 100644
--- a/test/test/test_eventdev_octeontx.c
+++ b/test/test/test_eventdev_octeontx.c
@@ -45,6 +45,7 @@
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_random.h>
+#include <rte_bus_vdev.h>
#include "test.h"
@@ -193,8 +194,13 @@ _eventdev_setup(int mode)
ret = rte_event_dev_configure(evdev, &dev_conf);
TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
- if (rte_event_queue_count(evdev) > 8) {
+ if (queue_count > 8) {
printf("test expects the unique priority per queue\n");
return -ENOTSUP;
}
@@ -204,8 +210,8 @@ _eventdev_setup(int mode)
* RTE_EVENT_DEV_PRIORITY_LOWEST
*/
uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
- rte_event_queue_count(evdev);
- for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
struct rte_event_queue_conf queue_conf;
ret = rte_event_queue_default_conf_get(evdev, i,
@@ -218,13 +224,17 @@ _eventdev_setup(int mode)
} else {
/* Configure event queues with default priority */
- for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(evdev, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
}
}
/* Configure event ports */
- for (i = 0; i < rte_event_port_count(evdev); i++) {
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_setup(evdev, i, NULL);
TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
@@ -335,6 +345,11 @@ generate_random_events(const unsigned int total_events)
unsigned int i;
int ret;
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
ret = rte_event_dev_info_get(evdev, &info);
TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
for (i = 0; i < total_events; i++) {
@@ -343,7 +358,7 @@ generate_random_events(const unsigned int total_events)
rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
rte_rand() % 256 /* sub_event_type */,
rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
- rte_rand() % rte_event_queue_count(evdev) /* queue */,
+ rte_rand() % queue_count /* queue */,
0 /* port */,
1 /* events */);
if (ret)
@@ -482,7 +497,7 @@ test_multi_queue_enq_single_port_deq(void)
}
/*
- * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
* operation
*
* For example, Inject 32 events over 0..7 queues
@@ -498,15 +513,19 @@ test_multi_queue_enq_single_port_deq(void)
static int
validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
{
- uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
- uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint32_t range = MAX_EVENTS / queue_count;
+ uint32_t expected_val = (index % range) * queue_count;
expected_val += ev->queue_id;
RTE_SET_USED(port);
TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
ev->mbuf->seqn, index, expected_val, range,
- rte_event_queue_count(evdev), MAX_EVENTS);
+ queue_count, MAX_EVENTS);
return 0;
}
@@ -518,8 +537,12 @@ test_multi_queue_priority(void)
int i, max_evts_roundoff;
/* See validate_queue_priority() comments for priority validate logic */
- max_evts_roundoff = MAX_EVENTS / rte_event_queue_count(evdev);
- max_evts_roundoff *= rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ max_evts_roundoff = MAX_EVENTS / queue_count;
+ max_evts_roundoff *= queue_count;
for (i = 0; i < max_evts_roundoff; i++) {
struct rte_event ev = {.event = 0, .u64 = 0};
@@ -528,7 +551,7 @@ test_multi_queue_priority(void)
TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
m->seqn = i;
- queue = i % rte_event_queue_count(evdev);
+ queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
rte_event_enqueue_burst(evdev, 0, &ev, 1);
@@ -651,18 +674,21 @@ static int
test_multi_queue_enq_multi_port_deq(void)
{
const unsigned int total_events = MAX_EVENTS;
- uint8_t nr_ports;
+ uint32_t nr_ports;
int ret;
ret = generate_random_events(total_events);
if (ret)
return TEST_FAILED;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
if (!nr_ports) {
printf("%s: Not enough ports=%d or workers=%d\n", __func__,
- rte_event_port_count(evdev), rte_lcore_count() - 1);
+ nr_ports, rte_lcore_count() - 1);
return TEST_SUCCESS;
}
@@ -691,14 +717,23 @@ test_queue_to_port_single_link(void)
{
int i, nr_links, ret;
+ uint32_t port_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
/* Unlink all connections that created in eventdev_setup */
- for (i = 0; i < rte_event_port_count(evdev); i++) {
+ for (i = 0; i < (int)port_count; i++) {
ret = rte_event_port_unlink(evdev, i, NULL, 0);
TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
}
- nr_links = RTE_MIN(rte_event_port_count(evdev),
- rte_event_queue_count(evdev));
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
const unsigned int total_events = MAX_EVENTS / nr_links;
/* Link queue x to port x and inject events to queue x through port x */
@@ -750,10 +785,20 @@ static int
test_queue_to_port_multi_link(void)
{
int ret, port0_events = 0, port1_events = 0;
- uint8_t nr_queues, nr_ports, queue, port;
+ uint8_t queue, port;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
- nr_queues = rte_event_queue_count(evdev);
- nr_ports = rte_event_port_count(evdev);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
if (nr_ports < 2) {
printf("%s: Not enough ports to test ports=%d\n",
@@ -854,14 +899,17 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
uint8_t out_sched_type)
{
const unsigned int total_events = MAX_EVENTS;
- uint8_t nr_ports;
+ uint32_t nr_ports;
int ret;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
if (!nr_ports) {
printf("%s: Not enough ports=%d or workers=%d\n", __func__,
- rte_event_port_count(evdev), rte_lcore_count() - 1);
+ nr_ports, rte_lcore_count() - 1);
return TEST_SUCCESS;
}
@@ -1007,15 +1055,23 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
uint8_t out_sched_type)
{
const unsigned int total_events = MAX_EVENTS;
- uint8_t nr_ports;
+ uint32_t nr_ports;
int ret;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
- if (rte_event_queue_count(evdev) < 2 || !nr_ports) {
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ if (queue_count < 2 || !nr_ports) {
printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
- __func__, rte_event_queue_count(evdev),
- rte_event_port_count(evdev), rte_lcore_count() - 1);
+ __func__, queue_count, nr_ports,
+ rte_lcore_count() - 1);
return TEST_SUCCESS;
}
@@ -1142,14 +1198,17 @@ worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
static int
launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
{
- uint8_t nr_ports;
+ uint32_t nr_ports;
int ret;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
if (!nr_ports) {
printf("%s: Not enough ports=%d or workers=%d\n", __func__,
- rte_event_port_count(evdev), rte_lcore_count() - 1);
+ nr_ports, rte_lcore_count() - 1);
return TEST_SUCCESS;
}
@@ -1184,7 +1243,11 @@ worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
struct rte_event ev;
uint16_t valid_event;
uint8_t port = param->port;
- uint8_t nr_queues = rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
rte_atomic32_t *total_events = param->total_events;
while (rte_atomic32_read(total_events) > 0) {
@@ -1222,7 +1285,11 @@ worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
struct rte_event ev;
uint16_t valid_event;
uint8_t port = param->port;
- uint8_t nr_queues = rte_event_queue_count(evdev);
+ uint32_t queue_count;
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
rte_atomic32_t *total_events = param->total_events;
while (rte_atomic32_read(total_events) > 0) {
@@ -1288,9 +1355,12 @@ worker_ordered_flow_producer(void *arg)
static inline int
test_producer_consumer_ingress_order_test(int (*fn)(void *))
{
- uint8_t nr_ports;
+ uint32_t nr_ports;
- nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+ TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
if (rte_lcore_count() < 3 || nr_ports < 2) {
printf("### Not enough cores for %s test.\n", __func__);
diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c
index ba8c053e..f524b6f8 100644
--- a/test/test/test_eventdev_sw.c
+++ b/test/test/test_eventdev_sw.c
@@ -39,7 +39,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -49,6 +48,9 @@
#include <rte_cycles.h>
#include <rte_eventdev.h>
#include <rte_pause.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+#include <rte_bus_vdev.h>
#include "test.h"
@@ -63,6 +65,7 @@ struct test {
uint8_t port[MAX_PORTS];
uint8_t qid[MAX_QIDS];
int nb_qids;
+ uint32_t service_id;
};
static struct rte_event release_ev;
@@ -219,7 +222,7 @@ create_lb_qids(struct test *t, int num_qids, uint32_t flags)
/* Q creation */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = flags,
+ .schedule_type = flags,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -242,20 +245,20 @@ create_lb_qids(struct test *t, int num_qids, uint32_t flags)
static inline int
create_atomic_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
}
static inline int
create_ordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
}
static inline int
create_unordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
}
static inline int
@@ -267,8 +270,6 @@ create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
static const struct rte_event_queue_conf conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
};
for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
@@ -417,7 +418,7 @@ run_prio_packet_test(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
struct test_event_dev_stats stats;
err = test_event_dev_stats_get(evdev, &stats);
@@ -509,7 +510,7 @@ test_single_directed_packet(struct test *t)
}
/* Run schedule() as dir packets may need to be re-ordered */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
struct test_event_dev_stats stats;
err = test_event_dev_stats_get(evdev, &stats);
@@ -576,7 +577,7 @@ test_directed_forward_credits(struct test *t)
printf("%d: error failed to enqueue\n", __LINE__);
return -1;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
uint32_t deq_pkts;
deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
@@ -738,7 +739,7 @@ burst_packets(struct test *t)
return -1;
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* Check stats for all NUM_PKTS arrived to sched core */
struct test_event_dev_stats stats;
@@ -827,7 +828,7 @@ abuse_inflights(struct test *t)
}
/* schedule */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
struct test_event_dev_stats stats;
@@ -965,7 +966,7 @@ xstats_tests(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* Device names / values */
int num_stats = rte_event_dev_xstats_names_get(evdev,
@@ -1240,7 +1241,7 @@ port_reconfig_credits(struct test *t)
const uint32_t NUM_ITERS = 32;
for (i = 0; i < NUM_ITERS; i++) {
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -1292,7 +1293,7 @@ port_reconfig_credits(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
struct rte_event ev[NPKTS];
int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
@@ -1322,7 +1323,7 @@ port_single_lb_reconfig(struct test *t)
static const struct rte_event_queue_conf conf_lb_atomic = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
};
@@ -1334,8 +1335,6 @@ port_single_lb_reconfig(struct test *t)
static const struct rte_event_queue_conf conf_single_link = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
};
if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
printf("%d: error creating qid\n", __LINE__);
@@ -1520,7 +1519,7 @@ xstats_id_reset_tests(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
static const char * const dev_names[] = {
"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
@@ -1822,7 +1821,7 @@ ordered_reconfigure(struct test *t)
}
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -1869,7 +1868,7 @@ qid_priorities(struct test *t)
for (i = 0; i < 3; i++) {
/* Create QID */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
/* increase priority (0 == highest), as we go */
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
.nb_atomic_flows = 1024,
@@ -1911,7 +1910,7 @@ qid_priorities(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* dequeue packets, verify priority was upheld */
struct rte_event ev[32];
@@ -1992,7 +1991,7 @@ load_balancing(struct test *t)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
struct test_event_dev_stats stats;
err = test_event_dev_stats_get(evdev, &stats);
@@ -2092,7 +2091,7 @@ load_balancing_history(struct test *t)
}
/* call the scheduler */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* Dequeue the flow 0 packet from port 1, so that we can then drop */
struct rte_event ev;
@@ -2109,7 +2108,7 @@ load_balancing_history(struct test *t)
rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
/* call the scheduler */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/*
* Set up the next set of flows, first a new flow to fill up
@@ -2142,7 +2141,7 @@ load_balancing_history(struct test *t)
}
/* schedule */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (err) {
@@ -2186,7 +2185,7 @@ load_balancing_history(struct test *t)
while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
rte_event_enqueue_burst(evdev, i, &release_ev, 1);
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
cleanup(t);
return 0;
@@ -2252,7 +2251,7 @@ invalid_qid(struct test *t)
}
/* call the scheduler */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (err) {
@@ -2337,7 +2336,7 @@ single_packet(struct test *t)
return -1;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (err) {
@@ -2380,7 +2379,7 @@ single_packet(struct test *t)
printf("%d: Failed to enqueue\n", __LINE__);
return -1;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (stats.port_inflight[wrk_enq] != 0) {
@@ -2468,7 +2467,7 @@ inflight_counts(struct test *t)
}
/* schedule */
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (err) {
@@ -2524,7 +2523,7 @@ inflight_counts(struct test *t)
* As the scheduler core decrements inflights, it needs to run to
* process packets to act on the drop messages
*/
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (stats.port_inflight[p1] != 0) {
@@ -2559,7 +2558,7 @@ inflight_counts(struct test *t)
* As the scheduler core decrements inflights, it needs to run to
* process packets to act on the drop messages
*/
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
err = test_event_dev_stats_get(evdev, &stats);
if (stats.port_inflight[p2] != 0) {
@@ -2653,7 +2652,7 @@ parallel_basic(struct test *t, int check_order)
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* use extra slot to make logic in loops easier */
struct rte_event deq_ev[w3_port + 1];
@@ -2680,7 +2679,7 @@ parallel_basic(struct test *t, int check_order)
return -1;
}
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* dequeue from the tx ports, we should get 3 packets */
deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
@@ -2758,7 +2757,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
printf("%d: Error doing first enqueue\n", __LINE__);
goto err;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
!= 1)
@@ -2783,7 +2782,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
printf("%d: Error with enqueue\n", __LINE__);
goto err;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
} while (rte_event_dev_xstats_by_name_get(evdev,
rx_port_free_stat, NULL) != 0);
@@ -2793,7 +2792,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
printf("%d: Error with enqueue\n", __LINE__);
goto err;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
/* check that the other port still has an empty CQ */
if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
@@ -2816,7 +2815,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
printf("%d: Error with enqueue\n", __LINE__);
goto err;
}
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
!= 1) {
@@ -3006,7 +3005,7 @@ worker_loopback(struct test *t)
while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
rte_eal_get_lcore_state(w_lcore) != FINISHED) {
- rte_event_schedule(evdev);
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
uint64_t new_cycles = rte_get_timer_cycles();
@@ -3033,7 +3032,8 @@ worker_loopback(struct test *t)
cycles = new_cycles;
}
}
- rte_event_schedule(evdev); /* ensure all completions are flushed */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+ /* ensure all completions are flushed */
rte_eal_mp_wait_lcore();
@@ -3070,6 +3070,14 @@ test_sw_eventdev(void)
}
}
+ if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
+ printf("Failed to get service ID for software event dev\n");
+ return -1;
+ }
+
+ rte_service_runstate_set(t->service_id, 1);
+ rte_service_set_runstate_mapped_check(t->service_id, 0);
+
/* Only create mbuf pool once, reuse for each test run */
if (!eventdev_func_mempool) {
eventdev_func_mempool = rte_pktmbuf_pool_create(
diff --git a/test/test/test_flow_classify.c b/test/test/test_flow_classify.c
new file mode 100644
index 00000000..9f331cd8
--- /dev/null
+++ b/test/test/test_flow_classify.c
@@ -0,0 +1,672 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <errno.h>
+
+#include "test.h"
+
+#include <rte_string_fns.h>
+#include <rte_mbuf.h>
+#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_acl.h>
+#include <rte_common.h>
+#include <rte_table_acl.h>
+#include <rte_flow.h>
+#include <rte_flow_classify.h>
+
+#include "packet_burst_generator.h"
+#include "test_flow_classify.h"
+
+
+#define FLOW_CLASSIFY_MAX_RULE_NUM 100
+struct flow_classifier *cls;
+
+struct flow_classifier {
+ struct rte_flow_classifier *cls;
+ uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX];
+ uint32_t n_tables;
+};
+
+struct flow_classifier_acl {
+ struct flow_classifier cls;
+} __rte_cache_aligned;
+
+/*
+ * test functions by passing invalid or
+ * non-workable parameters.
+ */
+static int
+test_invalid_parameters(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+
+ rule = rte_flow_classify_table_entry_add(NULL, 1, NULL, NULL, NULL,
+ NULL, NULL);
+ if (rule) {
+ printf("Line %i: flow_classifier_table_entry_add", __LINE__);
+ printf(" with NULL param should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(NULL, 1, NULL);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" with NULL param should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classifier_query(NULL, 1, NULL, 0, NULL, NULL);
+ if (!ret) {
+ printf("Line %i: flow_classifier_query", __LINE__);
+ printf(" with NULL param should have failed!\n");
+ return -1;
+ }
+
+ rule = rte_flow_classify_table_entry_add(NULL, 1, NULL, NULL, NULL,
+ NULL, &error);
+ if (rule) {
+ printf("Line %i: flow_classify_table_entry_add ", __LINE__);
+ printf("with NULL param should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(NULL, 1, NULL);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf("with NULL param should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classifier_query(NULL, 1, NULL, 0, NULL, NULL);
+ if (!ret) {
+ printf("Line %i: flow_classifier_query", __LINE__);
+ printf(" with NULL param should have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_valid_parameters(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int key_found;
+
+ /*
+ * set up parameters for rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_udp_item_1;
+ pattern[2] = udp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (!rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_invalid_patterns(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int key_found;
+
+ /*
+ * set up parameters for rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item_bad;
+ pattern[1] = ipv4_udp_item_1;
+ pattern[2] = udp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_udp_item_bad;
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ pattern[1] = ipv4_udp_item_1;
+ pattern[2] = udp_item_bad;
+ pattern[3] = end_item_bad;
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_invalid_actions(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int key_found;
+
+ /*
+ * set up parameters for rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_udp_item_1;
+ pattern[2] = udp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action_bad;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ actions[0] = count_action;
+ actions[1] = end_action_bad;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (!ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf("should have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+init_ipv4_udp_traffic(struct rte_mempool *mp,
+ struct rte_mbuf **pkts_burst, uint32_t burst_size)
+{
+ struct ether_hdr pkt_eth_hdr;
+ struct ipv4_hdr pkt_ipv4_hdr;
+ struct udp_hdr pkt_udp_hdr;
+ uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
+ uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
+ uint16_t src_port = 32;
+ uint16_t dst_port = 33;
+ uint16_t pktlen;
+
+ static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
+ static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
+
+ printf("Set up IPv4 UDP traffic\n");
+ initialize_eth_header(&pkt_eth_hdr,
+ (struct ether_addr *)src_mac,
+ (struct ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ pktlen = (uint16_t)(sizeof(struct ether_hdr));
+ printf("ETH pktlen %u\n", pktlen);
+
+ pktlen = initialize_ipv4_header(&pkt_ipv4_hdr, src_addr, dst_addr,
+ pktlen);
+ printf("ETH + IPv4 pktlen %u\n", pktlen);
+
+ pktlen = initialize_udp_header(&pkt_udp_hdr, src_port, dst_port,
+ pktlen);
+ printf("ETH + IPv4 + UDP pktlen %u\n\n", pktlen);
+
+ return generate_packet_burst(mp, pkts_burst, &pkt_eth_hdr,
+ 0, &pkt_ipv4_hdr, 1,
+ &pkt_udp_hdr, burst_size,
+ PACKET_BURST_GEN_PKT_LEN, 1);
+}
+
+static int
+init_ipv4_tcp_traffic(struct rte_mempool *mp,
+ struct rte_mbuf **pkts_burst, uint32_t burst_size)
+{
+ struct ether_hdr pkt_eth_hdr;
+ struct ipv4_hdr pkt_ipv4_hdr;
+ struct tcp_hdr pkt_tcp_hdr;
+ uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
+ uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
+ uint16_t src_port = 16;
+ uint16_t dst_port = 17;
+ uint16_t pktlen;
+
+ static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
+ static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
+
+ printf("Set up IPv4 TCP traffic\n");
+ initialize_eth_header(&pkt_eth_hdr,
+ (struct ether_addr *)src_mac,
+ (struct ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ pktlen = (uint16_t)(sizeof(struct ether_hdr));
+ printf("ETH pktlen %u\n", pktlen);
+
+ pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
+ dst_addr, pktlen, IPPROTO_TCP);
+ printf("ETH + IPv4 pktlen %u\n", pktlen);
+
+ pktlen = initialize_tcp_header(&pkt_tcp_hdr, src_port, dst_port,
+ pktlen);
+ printf("ETH + IPv4 + TCP pktlen %u\n\n", pktlen);
+
+ return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
+ 0, &pkt_ipv4_hdr, 1, IPPROTO_TCP,
+ &pkt_tcp_hdr, burst_size,
+ PACKET_BURST_GEN_PKT_LEN, 1);
+}
+
+static int
+init_ipv4_sctp_traffic(struct rte_mempool *mp,
+ struct rte_mbuf **pkts_burst, uint32_t burst_size)
+{
+ struct ether_hdr pkt_eth_hdr;
+ struct ipv4_hdr pkt_ipv4_hdr;
+ struct sctp_hdr pkt_sctp_hdr;
+ uint32_t src_addr = IPV4_ADDR(11, 12, 13, 14);
+ uint32_t dst_addr = IPV4_ADDR(15, 16, 17, 18);
+ uint16_t src_port = 10;
+ uint16_t dst_port = 11;
+ uint16_t pktlen;
+
+ static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
+ static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
+
+ printf("Set up IPv4 SCTP traffic\n");
+ initialize_eth_header(&pkt_eth_hdr,
+ (struct ether_addr *)src_mac,
+ (struct ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
+ pktlen = (uint16_t)(sizeof(struct ether_hdr));
+ printf("ETH pktlen %u\n", pktlen);
+
+ pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
+ dst_addr, pktlen, IPPROTO_SCTP);
+ printf("ETH + IPv4 pktlen %u\n", pktlen);
+
+ pktlen = initialize_sctp_header(&pkt_sctp_hdr, src_port, dst_port,
+ pktlen);
+ printf("ETH + IPv4 + SCTP pktlen %u\n\n", pktlen);
+
+ return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
+ 0, &pkt_ipv4_hdr, 1, IPPROTO_SCTP,
+ &pkt_sctp_hdr, burst_size,
+ PACKET_BURST_GEN_PKT_LEN, 1);
+}
+
+static int
+init_mbufpool(void)
+{
+ int socketid;
+ int ret = 0;
+ unsigned int lcore_id;
+ char s[64];
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ socketid = rte_lcore_to_socket_id(lcore_id);
+ if (socketid >= NB_SOCKETS) {
+ printf(
+ "Socket %d of lcore %u is out of range %d\n",
+ socketid, lcore_id, NB_SOCKETS);
+ ret = -1;
+ break;
+ }
+ if (mbufpool[socketid] == NULL) {
+ snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
+ mbufpool[socketid] =
+ rte_pktmbuf_pool_create(s, NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0, MBUF_SIZE,
+ socketid);
+ if (mbufpool[socketid]) {
+ printf("Allocated mbuf pool on socket %d\n",
+ socketid);
+ } else {
+ printf("Cannot init mbuf pool on socket %d\n",
+ socketid);
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+test_query_udp(void)
+{
+ struct rte_flow_error error;
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int i;
+ int key_found;
+
+ ret = init_ipv4_udp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
+ if (ret != MAX_PKT_BURST) {
+ printf("Line %i: init_udp_ipv4_traffic has failed!\n",
+ __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < MAX_PKT_BURST; i++)
+ bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
+
+ /*
+ * set up parameters for rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_udp_item_1;
+ pattern[2] = udp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (!rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classifier_query(cls->cls, 0, bufs, MAX_PKT_BURST,
+ rule, &udp_classify_stats);
+ if (ret) {
+ printf("Line %i: flow_classifier_query", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_query_tcp(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int i;
+ int key_found;
+
+ ret = init_ipv4_tcp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
+ if (ret != MAX_PKT_BURST) {
+ printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
+ __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < MAX_PKT_BURST; i++)
+ bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
+
+ /*
+ * set up parameters for rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_tcp_item_1;
+ pattern[2] = tcp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (!rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classifier_query(cls->cls, 0, bufs, MAX_PKT_BURST,
+ rule, &tcp_classify_stats);
+ if (ret) {
+ printf("Line %i: flow_classifier_query", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_query_sctp(void)
+{
+ struct rte_flow_classify_rule *rule;
+ int ret;
+ int i;
+ int key_found;
+
+ ret = init_ipv4_sctp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
+ if (ret != MAX_PKT_BURST) {
+ printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
+ __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < MAX_PKT_BURST; i++)
+ bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
+
+ /*
+ * set up parameters rte_flow_classify_table_entry_add and
+ * rte_flow_classify_table_entry_delete
+ */
+
+ attr.ingress = 1;
+ attr.priority = 1;
+ pattern[0] = eth_item;
+ pattern[1] = ipv4_sctp_item_1;
+ pattern[2] = sctp_item_1;
+ pattern[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(cls->cls, 0, &key_found,
+ &attr, pattern, actions, &error);
+ if (!rule) {
+ printf("Line %i: flow_classify_table_entry_add", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classifier_query(cls->cls, 0, bufs, MAX_PKT_BURST,
+ rule, &sctp_classify_stats);
+ if (ret) {
+ printf("Line %i: flow_classifier_query", __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+
+ ret = rte_flow_classify_table_entry_delete(cls->cls, 0, rule);
+ if (ret) {
+ printf("Line %i: rte_flow_classify_table_entry_delete",
+ __LINE__);
+ printf(" should not have failed!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_flow_classify(void)
+{
+ struct rte_table_acl_params table_acl_params;
+ struct rte_flow_classify_table_params cls_table_params;
+ struct rte_flow_classifier_params cls_params;
+ int socket_id;
+ int ret;
+ uint32_t size;
+
+ socket_id = rte_eth_dev_socket_id(0);
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
+ cls = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ cls_params.name = "flow_classifier";
+ cls_params.socket_id = socket_id;
+ cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL;
+ cls->cls = rte_flow_classifier_create(&cls_params);
+
+ /* initialise ACL table params */
+ table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
+ table_acl_params.name = "table_acl_ipv4_5tuple";
+ table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
+ memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
+
+ /* initialise table create params */
+ cls_table_params.ops = &rte_table_acl_ops,
+ cls_table_params.arg_create = &table_acl_params,
+
+ ret = rte_flow_classify_table_create(cls->cls, &cls_table_params,
+ &cls->table_id[0]);
+ if (ret) {
+ printf("Line %i: f_create has failed!\n", __LINE__);
+ rte_flow_classifier_free(cls->cls);
+ rte_free(cls);
+ return -1;
+ }
+ printf("Created table_acl for for IPv4 five tuple packets\n");
+
+ ret = init_mbufpool();
+ if (ret) {
+ printf("Line %i: init_mbufpool has failed!\n", __LINE__);
+ return -1;
+ }
+
+ if (test_invalid_parameters() < 0)
+ return -1;
+ if (test_valid_parameters() < 0)
+ return -1;
+ if (test_invalid_patterns() < 0)
+ return -1;
+ if (test_invalid_actions() < 0)
+ return -1;
+ if (test_query_udp() < 0)
+ return -1;
+ if (test_query_tcp() < 0)
+ return -1;
+ if (test_query_sctp() < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(flow_classify_autotest, test_flow_classify);
diff --git a/test/test/test_flow_classify.h b/test/test/test_flow_classify.h
new file mode 100644
index 00000000..39535cfc
--- /dev/null
+++ b/test/test/test_flow_classify.h
@@ -0,0 +1,234 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_FLOW_CLASSIFY_H_
+#define TEST_FLOW_CLASSIFY_H_
+
+#define MAX_PKT_BURST (32)
+#define NB_SOCKETS (1)
+#define MEMPOOL_CACHE_SIZE (256)
+#define MBUF_SIZE (512)
+#define NB_MBUF (512)
+
+/* test UDP, TCP and SCTP packets */
+static struct rte_mempool *mbufpool[NB_SOCKETS];
+static struct rte_mbuf *bufs[MAX_PKT_BURST];
+
+/* ACL field definitions for IPv4 5 tuple rule */
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+enum {
+ PROTO_INPUT_IPV4,
+ SRC_INPUT_IPV4,
+ DST_INPUT_IPV4,
+ SRCP_DESTP_INPUT_IPV4
+};
+
+static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+ /* first input field - always one byte long. */
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = PROTO_FIELD_IPV4,
+ .input_index = PROTO_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+ /* next input field (IPv4 source address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = SRC_FIELD_IPV4,
+ .input_index = SRC_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+ /* next input field (IPv4 destination address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = DST_FIELD_IPV4,
+ .input_index = DST_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+ /*
+ * Next 2 fields (src & dst ports) form 4 consecutive bytes.
+ * They share the same input index.
+ */
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = SRCP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = DSTP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+/* parameters for rte_flow_classify_validate and rte_flow_classify_create */
+
+/* test UDP pattern:
+ * "eth / ipv4 src spec 2.2.2.3 src mask 255.255.255.00 dst spec 2.2.2.7
+ * dst mask 255.255.255.00 / udp src is 32 dst is 33 / end"
+ */
+static struct rte_flow_item_ipv4 ipv4_udp_spec_1 = {
+ { 0, 0, 0, 0, 0, 0, IPPROTO_UDP, 0, IPv4(2, 2, 2, 3), IPv4(2, 2, 2, 7)}
+};
+static const struct rte_flow_item_ipv4 ipv4_mask_24 = {
+ .hdr = {
+ .next_proto_id = 0xff,
+ .src_addr = 0xffffff00,
+ .dst_addr = 0xffffff00,
+ },
+};
+static struct rte_flow_item_udp udp_spec_1 = {
+ { 32, 33, 0, 0 }
+};
+
+static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
+ 0, 0, 0 };
+static struct rte_flow_item eth_item_bad = { -1, 0, 0, 0 };
+
+static struct rte_flow_item ipv4_udp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
+ &ipv4_udp_spec_1, 0, &ipv4_mask_24};
+static struct rte_flow_item ipv4_udp_item_bad = { RTE_FLOW_ITEM_TYPE_IPV4,
+ NULL, 0, NULL};
+
+static struct rte_flow_item udp_item_1 = { RTE_FLOW_ITEM_TYPE_UDP,
+ &udp_spec_1, 0, &rte_flow_item_udp_mask};
+static struct rte_flow_item udp_item_bad = { RTE_FLOW_ITEM_TYPE_UDP,
+ NULL, 0, NULL};
+
+static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
+ 0, 0, 0 };
+static struct rte_flow_item end_item_bad = { -1, 0, 0, 0 };
+
+/* test TCP pattern:
+ * "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
+ * dst mask 255.255.255.00 / tcp src is 16 dst is 17 / end"
+ */
+static struct rte_flow_item_ipv4 ipv4_tcp_spec_1 = {
+ { 0, 0, 0, 0, 0, 0, IPPROTO_TCP, 0, IPv4(1, 2, 3, 4), IPv4(5, 6, 7, 8)}
+};
+
+static struct rte_flow_item_tcp tcp_spec_1 = {
+ { 16, 17, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static struct rte_flow_item ipv4_tcp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
+ &ipv4_tcp_spec_1, 0, &ipv4_mask_24};
+
+static struct rte_flow_item tcp_item_1 = { RTE_FLOW_ITEM_TYPE_TCP,
+ &tcp_spec_1, 0, &rte_flow_item_tcp_mask};
+
+/* test SCTP pattern:
+ * "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
+ * dst mask 255.255.255.00 / sctp src is 16 dst is 17/ end"
+ */
+static struct rte_flow_item_ipv4 ipv4_sctp_spec_1 = {
+ { 0, 0, 0, 0, 0, 0, IPPROTO_SCTP, 0, IPv4(11, 12, 13, 14),
+ IPv4(15, 16, 17, 18)}
+};
+
+static struct rte_flow_item_sctp sctp_spec_1 = {
+ { 10, 11, 0, 0}
+};
+
+static struct rte_flow_item ipv4_sctp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
+ &ipv4_sctp_spec_1, 0, &ipv4_mask_24};
+
+static struct rte_flow_item sctp_item_1 = { RTE_FLOW_ITEM_TYPE_SCTP,
+ &sctp_spec_1, 0, &rte_flow_item_sctp_mask};
+
+
+/* test actions:
+ * "actions count / end"
+ */
+static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0};
+static struct rte_flow_action count_action_bad = { -1, 0};
+
+static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
+static struct rte_flow_action end_action_bad = { -1, 0};
+
+static struct rte_flow_action actions[2];
+
+/* test attributes */
+static struct rte_flow_attr attr;
+
+/* test error */
+static struct rte_flow_error error;
+
+/* test pattern */
+static struct rte_flow_item pattern[4];
+
+/* flow classify data for UDP burst */
+static struct rte_flow_classify_ipv4_5tuple_stats udp_ntuple_stats;
+static struct rte_flow_classify_stats udp_classify_stats = {
+ .stats = (void *)&udp_ntuple_stats
+};
+
+/* flow classify data for TCP burst */
+static struct rte_flow_classify_ipv4_5tuple_stats tcp_ntuple_stats;
+static struct rte_flow_classify_stats tcp_classify_stats = {
+ .stats = (void *)&tcp_ntuple_stats
+};
+
+/* flow classify data for SCTP burst */
+static struct rte_flow_classify_ipv4_5tuple_stats sctp_ntuple_stats;
+static struct rte_flow_classify_stats sctp_classify_stats = {
+ .stats = (void *)&sctp_ntuple_stats
+};
+#endif /* TEST_FLOW_CLASSIFY_H_ */
diff --git a/test/test/test_func_reentrancy.c b/test/test/test_func_reentrancy.c
index 95440dd1..514c8761 100644
--- a/test/test/test_func_reentrancy.c
+++ b/test/test/test_func_reentrancy.c
@@ -44,7 +44,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index 2fafc44b..4668cf1a 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -44,7 +44,6 @@
#include <rte_cycles.h>
#include <rte_random.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index db17fdf3..b9567275 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -42,6 +42,7 @@
#include <rte_string_fns.h>
#include <rte_mempool.h>
#include <rte_ethdev.h>
+#include <rte_bus_pci.h>
#include <rte_cycles.h>
#include <rte_kni.h>
@@ -132,7 +133,7 @@ test_kni_lookup_mempool(void)
}
/* Callback for request of changing MTU */
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
printf("Change MTU of port %d to %u\n", port_id, new_mtu);
kni_pkt_mtu = new_mtu;
@@ -362,7 +363,7 @@ test_kni_register_handler_mp(void)
}
static int
-test_kni_processing(uint8_t port_id, struct rte_mempool *mp)
+test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
{
int ret = 0;
unsigned i;
@@ -387,7 +388,7 @@ test_kni_processing(uint8_t port_id, struct rte_mempool *mp)
conf.core_id = 1;
conf.force_bind = 1;
conf.mbuf_size = MAX_PACKET_SZ;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
ops = kni_ops;
ops.port_id = port_id;
@@ -472,7 +473,7 @@ static int
test_kni(void)
{
int ret = -1;
- uint8_t nb_ports, port_id;
+ uint16_t nb_ports, port_id;
struct rte_kni *kni;
struct rte_mempool *mp;
struct rte_kni_conf conf;
@@ -538,7 +539,7 @@ test_kni(void)
rte_eth_dev_info_get(port_id, &info);
conf.addr = info.pci_dev->addr;
conf.id = info.pci_dev->id;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
@@ -567,7 +568,7 @@ test_kni(void)
rte_eth_dev_info_get(port_id, &info);
conf.addr = info.pci_dev->addr;
conf.id = info.pci_dev->id;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index dc28cea5..c6e3a725 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -94,9 +94,9 @@ uint8_t slave_mac[] = {0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 };
uint8_t bonded_mac[] = {0xAA, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
struct link_bonding_unittest_params {
- int8_t bonded_port_id;
- int8_t slave_port_ids[TEST_MAX_NUMBER_OF_PORTS];
- uint8_t bonded_slave_count;
+ int16_t bonded_port_id;
+ int16_t slave_port_ids[TEST_MAX_NUMBER_OF_PORTS];
+ uint16_t bonded_slave_count;
uint8_t bonding_mode;
uint16_t nb_rx_q;
@@ -226,7 +226,7 @@ static void free_virtualpmd_tx_queue(void);
static int
-configure_ethdev(uint8_t port_id, uint8_t start, uint8_t en_isr)
+configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
{
int q_id;
@@ -317,7 +317,7 @@ test_create_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* Don't try to recreate bonded device if re-running test suite*/
if (test_params->bonded_port_id == -1) {
@@ -387,7 +387,7 @@ test_add_slave_to_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params->bonded_port_id,
test_params->slave_port_ids[test_params->bonded_slave_count]),
@@ -434,7 +434,7 @@ test_remove_slave_from_bonded_device(void)
{
int current_slave_count;
struct ether_addr read_mac_addr, *mac_addr;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove(test_params->bonded_port_id,
test_params->slave_port_ids[test_params->bonded_slave_count-1]),
@@ -496,7 +496,7 @@ static int
test_add_already_bonded_slave_to_bonded_device(void)
{
int port_id, current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
char pmd_name[RTE_ETH_NAME_MAX_LEN];
test_add_slave_to_bonded_device();
@@ -528,7 +528,7 @@ static int
test_get_slaves_from_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(),
"Failed to add slave to bonded device");
@@ -609,7 +609,7 @@ test_start_bonded_device(void)
struct rte_eth_link link_status;
int current_slave_count, current_bonding_mode, primary_port;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* Add slave to bonded device*/
TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(),
@@ -658,7 +658,7 @@ static int
test_stop_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
struct rte_eth_link link_status;
@@ -932,7 +932,7 @@ test_set_bonded_port_initialization_mac_assignment(void)
{
int i, slave_count, bonded_port_id;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int slave_port_ids[BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT];
struct ether_addr slave_mac_addr, bonded_mac_addr, read_mac_addr;
@@ -1114,7 +1114,7 @@ test_set_bonded_port_initialization_mac_assignment(void)
static int
initialize_bonded_device_with_slaves(uint8_t bonding_mode, uint8_t bond_en_isr,
- uint8_t number_of_slaves, uint8_t enable_slave)
+ uint16_t number_of_slaves, uint8_t enable_slave)
{
/* Configure bonded device */
TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0,
@@ -1179,7 +1179,7 @@ int test_lsc_interrupt_count;
static int
-test_bonding_lsc_event_callback(uint8_t port_id __rte_unused,
+test_bonding_lsc_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type type __rte_unused,
void *param __rte_unused,
void *ret_param __rte_unused)
@@ -1224,7 +1224,7 @@ static int
test_status_interrupt(void)
{
int slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* initialized bonding device with T slaves */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
@@ -1313,7 +1313,7 @@ test_status_interrupt(void)
static int
generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size,
uint8_t vlan, uint8_t ipv4, uint8_t toggle_dst_mac,
- uint8_t toggle_ip_addr, uint8_t toggle_udp_port)
+ uint8_t toggle_ip_addr, uint16_t toggle_udp_port)
{
uint16_t pktlen, generated_burst_size, ether_type;
void *ip_hdr;
@@ -1854,7 +1854,7 @@ test_roundrobin_verify_slave_link_status_change_behaviour(void)
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
@@ -2408,7 +2408,7 @@ test_activebackup_verify_slave_link_status_change_failover(void)
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count, primary_port;
@@ -3302,7 +3302,7 @@ test_balance_verify_slave_link_status_change_behaviour(void)
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
@@ -3861,7 +3861,7 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
@@ -4373,7 +4373,7 @@ test_tlb_verify_slave_link_status_change_failover(void)
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count, primary_port;
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 8e9e23db..a7a3f8e2 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -102,7 +102,7 @@ static const struct ether_addr slow_protocol_mac_addr = {
struct slave_conf {
struct rte_ring *rx_queue;
struct rte_ring *tx_queue;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t bonded : 1;
uint8_t lacp_parnter_state;
@@ -235,7 +235,7 @@ free_pkts(struct rte_mbuf **pkts, uint16_t count)
}
static int
-configure_ethdev(uint8_t port_id, uint8_t start)
+configure_ethdev(uint16_t port_id, uint8_t start)
{
TEST_ASSERT(rte_eth_dev_configure(port_id, 1, 1, &default_pmd_conf) == 0,
"Failed to configure device %u", port_id);
@@ -325,7 +325,7 @@ remove_slave(struct slave_conf *slave)
}
static void
-lacp_recv_cb(uint8_t slave_id, struct rte_mbuf *lacp_pkt)
+lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt)
{
struct ether_hdr *hdr;
struct slow_protocol_frame *slow_hdr;
@@ -343,7 +343,7 @@ lacp_recv_cb(uint8_t slave_id, struct rte_mbuf *lacp_pkt)
}
static int
-initialize_bonded_device_with_slaves(uint8_t slave_count, uint8_t external_sm)
+initialize_bonded_device_with_slaves(uint16_t slave_count, uint8_t external_sm)
{
uint8_t i;
@@ -379,8 +379,8 @@ remove_slaves_and_stop_bonded_device(void)
{
struct slave_conf *slave;
int retval;
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t i;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t i;
rte_eth_dev_stop(test_params.bonded_port_id);
@@ -411,7 +411,7 @@ test_setup(void)
char name[RTE_ETH_NAME_MAX_LEN];
struct slave_conf *port;
const uint8_t socket_id = rte_socket_id();
- uint8_t i;
+ uint16_t i;
if (test_params.mbuf_pool == NULL) {
nb_mbuf_per_pool = TEST_RX_DESC_MAX + DEF_PKT_BURST +
@@ -661,7 +661,7 @@ bond_handshake(void)
TEST_ASSERT_EQUAL(all_slaves_done, 1, "Bond handshake failed\n");
/* If flags doesn't match - report failure */
- return all_slaves_done = 1 ? TEST_SUCCESS : TEST_FAILED;
+ return all_slaves_done == 1 ? TEST_SUCCESS : TEST_FAILED;
}
#define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports)
@@ -1521,7 +1521,7 @@ check_environment(void)
{
struct slave_conf *port;
uint8_t i, env_state;
- uint8_t slaves[RTE_DIM(test_params.slave_ports)];
+ uint16_t slaves[RTE_DIM(test_params.slave_ports)];
int slaves_count;
env_state = 0;
diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index f8cf1cab..54cbf128 100644
--- a/test/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -48,6 +48,7 @@
#include <rte_log.h>
#include <rte_lcore.h>
#include <rte_memory.h>
+#include <rte_bus_vdev.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
@@ -75,7 +76,7 @@
#define INVALID_BONDING_MODE (-1)
struct slave_conf {
- uint8_t port_id;
+ uint16_t port_id;
struct rte_eth_dev_info dev_info;
struct rte_eth_rss_conf rss_conf;
@@ -159,7 +160,8 @@ static struct rte_eth_conf rss_pmd_conf = {
RTE_DIM(test_params.slave_ports))
static int
-configure_ethdev(uint8_t port_id, struct rte_eth_conf *eth_conf, uint8_t start)
+configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf,
+ uint8_t start)
{
int rxq, txq;
@@ -243,7 +245,7 @@ bond_slaves(void)
* Set all RETA values in port_id to value
*/
static int
-reta_set(uint8_t port_id, uint8_t value, int reta_size)
+reta_set(uint16_t port_id, uint8_t value, int reta_size)
{
struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
int i, j;
diff --git a/test/test/test_logs.c b/test/test/test_logs.c
index 730a86bd..d6e4973a 100644
--- a/test/test/test_logs.c
+++ b/test/test/test_logs.c
@@ -38,7 +38,6 @@
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -61,9 +60,9 @@
static int
test_logs(void)
{
- /* enable these logs type */
- rte_log_set_level(RTE_LOGTYPE_TESTAPP1, RTE_LOG_EMERG);
- rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_EMERG);
+ /* set logtype level low to so we can test global level */
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP1, RTE_LOG_DEBUG);
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_DEBUG);
/* log in error level */
rte_log_set_global_level(RTE_LOG_ERR);
@@ -75,8 +74,8 @@ test_logs(void)
RTE_LOG(ERR, TESTAPP2, "error message (not displayed)\n");
RTE_LOG(CRIT, TESTAPP2, "critical message\n");
- /* disable one log type */
- rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_DEBUG);
+ /* bump up single log type level above global to test it */
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_EMERG);
/* log in error level */
rte_log_set_global_level(RTE_LOG_ERR);
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index 013fd440..4572cafb 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -41,7 +41,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
@@ -108,8 +107,7 @@ test_align_overlap_per_lcore(__attribute__((unused)) void *arg)
}
for(j = 0; j < 1000 ; j++) {
if( *(char *)p1 != 0) {
- printf("rte_zmalloc didn't zero"
- "the allocated memory\n");
+ printf("rte_zmalloc didn't zero the allocated memory\n");
ret = -1;
}
}
@@ -180,8 +178,7 @@ test_reordered_free_per_lcore(__attribute__((unused)) void *arg)
}
for(j = 0; j < 1000 ; j++) {
if( *(char *)p1 != 0) {
- printf("rte_zmalloc didn't zero"
- "the allocated memory\n");
+ printf("rte_zmalloc didn't zero the allocated memory\n");
ret = -1;
}
}
@@ -293,7 +290,7 @@ test_multi_alloc_statistics(void)
struct rte_malloc_socket_stats pre_stats, post_stats ,first_stats, second_stats;
size_t size = 2048;
int align = 1024;
-#ifndef RTE_LIBRTE_MALLOC_DEBUG
+#ifndef RTE_MALLOC_DEBUG
int trailer_size = 0;
#else
int trailer_size = RTE_CACHE_LINE_SIZE;
@@ -623,7 +620,7 @@ test_rte_malloc_validate(void)
const size_t request_size = 1024;
size_t allocated_size;
char *data_ptr = rte_malloc(NULL, request_size, RTE_CACHE_LINE_SIZE);
-#ifdef RTE_LIBRTE_MALLOC_DEBUG
+#ifdef RTE_MALLOC_DEBUG
int retval;
char *over_write_vals = NULL;
#endif
@@ -645,7 +642,7 @@ test_rte_malloc_validate(void)
if (allocated_size < request_size)
err_return();
-#ifdef RTE_LIBRTE_MALLOC_DEBUG
+#ifdef RTE_MALLOC_DEBUG
/****** change the header to be bad */
char save_buf[64];
diff --git a/test/test/test_mbuf.c b/test/test/test_mbuf.c
index 3396b4a9..414509b2 100644
--- a/test/test/test_mbuf.c
+++ b/test/test/test_mbuf.c
@@ -45,7 +45,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -907,7 +906,7 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
}
badbuf = *buf;
- badbuf.buf_physaddr = 0;
+ badbuf.buf_iova = 0;
if (verify_mbuf_check_panics(&badbuf)) {
printf("Error with bad-physaddr mbuf test\n");
return -1;
diff --git a/test/test/test_member.c b/test/test/test_member.c
new file mode 100644
index 00000000..02375fdc
--- /dev/null
+++ b/test/test/test_member.c
@@ -0,0 +1,744 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This test is for membership library's simple feature test */
+
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_member.h>
+#include <rte_byteorder.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ip.h>
+
+#include "test.h"
+
+struct rte_member_setsum *setsum_ht;
+struct rte_member_setsum *setsum_cache;
+struct rte_member_setsum *setsum_vbf;
+
+/* 5-tuple key type */
+struct flow_key {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint8_t proto;
+} __attribute__((packed));
+
+/* Set ID Macros for multimatch test usage */
+#define M_MATCH_S 1 /* Not start with 0 since by default 0 means no match */
+#define M_MATCH_E 15
+#define M_MATCH_STEP 2
+#define M_MATCH_CNT \
+ (1 + (M_MATCH_E - M_MATCH_S) / M_MATCH_STEP)
+
+
+#define NUM_SAMPLES 5
+#define MAX_MATCH 32
+
+/* Keys used by unit test functions */
+static struct flow_key keys[NUM_SAMPLES] = {
+ {
+ .ip_src = IPv4(0x03, 0x02, 0x01, 0x00),
+ .ip_dst = IPv4(0x07, 0x06, 0x05, 0x04),
+ .port_src = 0x0908,
+ .port_dst = 0x0b0a,
+ .proto = 0x0c,
+ },
+ {
+ .ip_src = IPv4(0x13, 0x12, 0x11, 0x10),
+ .ip_dst = IPv4(0x17, 0x16, 0x15, 0x14),
+ .port_src = 0x1918,
+ .port_dst = 0x1b1a,
+ .proto = 0x1c,
+ },
+ {
+ .ip_src = IPv4(0x23, 0x22, 0x21, 0x20),
+ .ip_dst = IPv4(0x27, 0x26, 0x25, 0x24),
+ .port_src = 0x2928,
+ .port_dst = 0x2b2a,
+ .proto = 0x2c,
+ },
+ {
+ .ip_src = IPv4(0x33, 0x32, 0x31, 0x30),
+ .ip_dst = IPv4(0x37, 0x36, 0x35, 0x34),
+ .port_src = 0x3938,
+ .port_dst = 0x3b3a,
+ .proto = 0x3c,
+ },
+ {
+ .ip_src = IPv4(0x43, 0x42, 0x41, 0x40),
+ .ip_dst = IPv4(0x47, 0x46, 0x45, 0x44),
+ .port_src = 0x4948,
+ .port_dst = 0x4b4a,
+ .proto = 0x4c,
+ }
+};
+
+uint32_t test_set[NUM_SAMPLES] = {1, 2, 3, 4, 5};
+
+#define ITERATIONS 3
+#define KEY_SIZE 4
+
+#define MAX_ENTRIES (1 << 16)
+uint8_t generated_keys[MAX_ENTRIES][KEY_SIZE];
+
+static struct rte_member_parameters params = {
+ .num_keys = MAX_ENTRIES, /* Total hash table entries. */
+ .key_len = KEY_SIZE, /* Length of hash key. */
+
+ /* num_set and false_positive_rate only relevant to vBF */
+ .num_set = 16,
+ .false_positive_rate = 0.03,
+ .prim_hash_seed = 1,
+ .sec_hash_seed = 11,
+ .socket_id = 0 /* NUMA Socket ID for memory. */
+};
+
+/*
+ * Sequence of operations for find existing setsummary
+ *
+ * - create setsum
+ * - find existing setsum: hit
+ * - find non-existing setsum: miss
+ *
+ */
+static int
+test_member_find_existing(void)
+{
+ struct rte_member_setsum *tmp_setsum = NULL, *result = NULL;
+ struct rte_member_parameters tmp_params = {
+ .name = "member_find_existing",
+ .num_keys = MAX_ENTRIES, /* Total hash table entries. */
+ .key_len = KEY_SIZE, /* Length of hash key. */
+ .type = RTE_MEMBER_TYPE_HT,
+ .num_set = 32,
+ .false_positive_rate = 0.03,
+ .prim_hash_seed = 1,
+ .sec_hash_seed = 11,
+ .socket_id = 0 /* NUMA Socket ID for memory. */
+ };
+
+ /* Create */
+ tmp_setsum = rte_member_create(&tmp_params);
+ TEST_ASSERT(tmp_setsum != NULL, "setsum creation failed");
+
+ /* Try to find existing hash table */
+ result = rte_member_find_existing("member_find_existing");
+ TEST_ASSERT(result == tmp_setsum, "could not find existing setsum");
+
+ /* Try to find non-existing hash table */
+ result = rte_member_find_existing("member_find_non_existing");
+ TEST_ASSERT(result == NULL, "found setsum that shouldn't exist");
+
+ /* Cleanup. */
+ rte_member_free(tmp_setsum);
+
+ return 0;
+}
+
+/*
+ * Test for bad creating parameters
+ */
+static int
+test_member_create_bad_param(void)
+{
+ struct rte_member_setsum *bad_setsum = NULL;
+ struct rte_member_parameters bad_params = {
+ .num_keys = MAX_ENTRIES, /* Total hash table entries. */
+ .key_len = KEY_SIZE, /* Length of hash key. */
+ .type = RTE_MEMBER_TYPE_HT,
+ .num_set = 32,
+ .false_positive_rate = 0.03,
+ .prim_hash_seed = 1,
+ .sec_hash_seed = 11,
+ .socket_id = 0 /* NUMA Socket ID for memory. */
+ };
+
+ printf("Expected error section begin...\n");
+ bad_params.name = "bad_param1";
+ bad_params.num_set = 0;
+ bad_params.type = RTE_MEMBER_TYPE_VBF;
+ /* Test with 0 set for vBF should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with invalid "
+ "number of set for vBF\n");
+ return -1;
+ }
+
+ bad_params.name = "bad_param2";
+ bad_params.false_positive_rate = 0;
+ bad_params.num_set = 32;
+ /* Test with 0 false positive for vBF should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with invalid "
+ "false positive rate for vBF\n");
+ return -1;
+ }
+
+ bad_params.name = "bad_param3";
+ bad_params.false_positive_rate = 0.03;
+ bad_params.num_keys = 0;
+ /* Test with 0 key per BF for vBF should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with invalid "
+ "num_keys for vBF\n");
+ return -1;
+ }
+
+ bad_params.name = "bad_param4";
+ bad_params.type = RTE_MEMBER_TYPE_HT;
+ bad_params.num_keys = RTE_MEMBER_BUCKET_ENTRIES / 2;
+ /* Test with less than 1 bucket for HTSS should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with too few "
+ "number of keys(entries) for HT\n");
+ return -1;
+ }
+
+ bad_params.name = "bad_param5";
+ bad_params.num_keys = RTE_MEMBER_ENTRIES_MAX + 1;
+ /* Test with more than maximum entries for HTSS should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with to many "
+ "number of keys(entries) for HT\n");
+ return -1;
+ }
+
+ bad_params.name = "bad_param5";
+ /* Test with same name should fail */
+ bad_setsum = rte_member_create(&bad_params);
+ if (bad_setsum != NULL) {
+ rte_member_free(bad_setsum);
+ printf("Impossible creating setsum successfully with existed "
+ "name\n");
+ return -1;
+ }
+ printf("Expected error section end...\n");
+ rte_member_free(bad_setsum);
+ return 0;
+}
+
+/* Create test setsummaries. */
+static int test_member_create(void)
+{
+ params.key_len = sizeof(struct flow_key);
+
+ params.name = "test_member_ht";
+ params.is_cache = 0;
+ params.type = RTE_MEMBER_TYPE_HT;
+ setsum_ht = rte_member_create(&params);
+
+ params.name = "test_member_cache";
+ params.is_cache = 1;
+ setsum_cache = rte_member_create(&params);
+
+ params.name = "test_member_vbf";
+ params.type = RTE_MEMBER_TYPE_VBF;
+ setsum_vbf = rte_member_create(&params);
+
+ if (setsum_ht == NULL || setsum_cache == NULL || setsum_vbf == NULL) {
+ printf("Creation of setsums fail\n");
+ return -1;
+ }
+ printf("Creation of setsums success\n");
+ return 0;
+}
+
+static int test_member_insert(void)
+{
+ int ret_ht, ret_cache, ret_vbf, i;
+
+ for (i = 0; i < NUM_SAMPLES; i++) {
+ ret_ht = rte_member_add(setsum_ht, &keys[i], test_set[i]);
+ ret_cache = rte_member_add(setsum_cache, &keys[i],
+ test_set[i]);
+ ret_vbf = rte_member_add(setsum_vbf, &keys[i], test_set[i]);
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0 && ret_vbf >= 0,
+ "insert error");
+ }
+ printf("insert key success\n");
+ return 0;
+}
+
+static int test_member_lookup(void)
+{
+ int ret_ht, ret_cache, ret_vbf, i;
+ uint16_t set_ht, set_cache, set_vbf;
+ member_set_t set_ids_ht[NUM_SAMPLES] = {0};
+ member_set_t set_ids_cache[NUM_SAMPLES] = {0};
+ member_set_t set_ids_vbf[NUM_SAMPLES] = {0};
+
+ uint32_t num_key_ht = NUM_SAMPLES;
+ uint32_t num_key_cache = NUM_SAMPLES;
+ uint32_t num_key_vbf = NUM_SAMPLES;
+
+ const void *key_array[NUM_SAMPLES];
+
+ /* Single lookup test */
+ for (i = 0; i < NUM_SAMPLES; i++) {
+ ret_ht = rte_member_lookup(setsum_ht, &keys[i], &set_ht);
+ ret_cache = rte_member_lookup(setsum_cache, &keys[i],
+ &set_cache);
+ ret_vbf = rte_member_lookup(setsum_vbf, &keys[i], &set_vbf);
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0 && ret_vbf >= 0,
+ "single lookup function error");
+
+ TEST_ASSERT(set_ht == test_set[i] &&
+ set_cache == test_set[i] &&
+ set_vbf == test_set[i],
+ "single lookup set value error");
+ }
+ printf("lookup single key success\n");
+
+ /* Bulk lookup test */
+ for (i = 0; i < NUM_SAMPLES; i++)
+ key_array[i] = &keys[i];
+
+ ret_ht = rte_member_lookup_bulk(setsum_ht, key_array,
+ num_key_ht, set_ids_ht);
+
+ ret_cache = rte_member_lookup_bulk(setsum_cache, key_array,
+ num_key_cache, set_ids_cache);
+
+ ret_vbf = rte_member_lookup_bulk(setsum_vbf, key_array,
+ num_key_vbf, set_ids_vbf);
+
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0 && ret_vbf >= 0,
+ "bulk lookup function error");
+
+ for (i = 0; i < NUM_SAMPLES; i++) {
+ TEST_ASSERT((set_ids_ht[i] == test_set[i]) &&
+ (set_ids_cache[i] == test_set[i]) &&
+ (set_ids_vbf[i] == test_set[i]),
+ "bulk lookup result error");
+ }
+
+ return 0;
+}
+
+static int test_member_delete(void)
+{
+ int ret_ht, ret_cache, ret_vbf, i;
+ uint16_t set_ht, set_cache, set_vbf;
+ const void *key_array[NUM_SAMPLES];
+ member_set_t set_ids_ht[NUM_SAMPLES] = {0};
+ member_set_t set_ids_cache[NUM_SAMPLES] = {0};
+ member_set_t set_ids_vbf[NUM_SAMPLES] = {0};
+ uint32_t num_key_ht = NUM_SAMPLES;
+ uint32_t num_key_cache = NUM_SAMPLES;
+ uint32_t num_key_vbf = NUM_SAMPLES;
+
+ /* Delete part of all inserted keys */
+ for (i = 0; i < NUM_SAMPLES / 2; i++) {
+ ret_ht = rte_member_delete(setsum_ht, &keys[i], test_set[i]);
+ ret_cache = rte_member_delete(setsum_cache, &keys[i],
+ test_set[i]);
+ ret_vbf = rte_member_delete(setsum_vbf, &keys[i], test_set[i]);
+ /* VBF does not support delete yet, so return error code */
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0,
+ "key deletion function error");
+ TEST_ASSERT(ret_vbf < 0,
+ "vbf does not support deletion, error");
+ }
+
+ for (i = 0; i < NUM_SAMPLES; i++)
+ key_array[i] = &keys[i];
+
+ ret_ht = rte_member_lookup_bulk(setsum_ht, key_array,
+ num_key_ht, set_ids_ht);
+
+ ret_cache = rte_member_lookup_bulk(setsum_cache, key_array,
+ num_key_cache, set_ids_cache);
+
+ ret_vbf = rte_member_lookup_bulk(setsum_vbf, key_array,
+ num_key_vbf, set_ids_vbf);
+
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0 && ret_vbf >= 0,
+ "bulk lookup function error");
+
+ for (i = 0; i < NUM_SAMPLES / 2; i++) {
+ TEST_ASSERT((set_ids_ht[i] == RTE_MEMBER_NO_MATCH) &&
+ (set_ids_cache[i] == RTE_MEMBER_NO_MATCH),
+ "bulk lookup result error");
+ }
+
+ for (i = NUM_SAMPLES / 2; i < NUM_SAMPLES; i++) {
+ TEST_ASSERT((set_ids_ht[i] == test_set[i]) &&
+ (set_ids_cache[i] == test_set[i]) &&
+ (set_ids_vbf[i] == test_set[i]),
+ "bulk lookup result error");
+ }
+
+ /* Delete the left of inserted keys */
+ for (i = NUM_SAMPLES / 2; i < NUM_SAMPLES; i++) {
+ ret_ht = rte_member_delete(setsum_ht, &keys[i], test_set[i]);
+ ret_cache = rte_member_delete(setsum_cache, &keys[i],
+ test_set[i]);
+ ret_vbf = rte_member_delete(setsum_vbf, &keys[i], test_set[i]);
+ /* VBF does not support delete yet, so return error code */
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0,
+ "key deletion function error");
+ TEST_ASSERT(ret_vbf < 0,
+ "vbf does not support deletion, error");
+ }
+
+ for (i = 0; i < NUM_SAMPLES; i++) {
+ ret_ht = rte_member_lookup(setsum_ht, &keys[i], &set_ht);
+ ret_cache = rte_member_lookup(setsum_cache, &keys[i],
+ &set_cache);
+ ret_vbf = rte_member_lookup(setsum_vbf, &keys[i], &set_vbf);
+ TEST_ASSERT(ret_ht >= 0 && ret_cache >= 0,
+ "key lookup function error");
+ TEST_ASSERT(set_ht == RTE_MEMBER_NO_MATCH &&
+ ret_cache == RTE_MEMBER_NO_MATCH,
+ "key deletion failed");
+ }
+ /* Reset vbf for other following tests */
+ rte_member_reset(setsum_vbf);
+
+ printf("delete success\n");
+ return 0;
+}
+
+static int test_member_multimatch(void)
+{
+ int ret_ht, ret_vbf, ret_cache;
+ member_set_t set_ids_ht[MAX_MATCH] = {0};
+ member_set_t set_ids_vbf[MAX_MATCH] = {0};
+ member_set_t set_ids_cache[MAX_MATCH] = {0};
+
+ member_set_t set_ids_ht_m[NUM_SAMPLES][MAX_MATCH] = {{0} };
+ member_set_t set_ids_vbf_m[NUM_SAMPLES][MAX_MATCH] = {{0} };
+ member_set_t set_ids_cache_m[NUM_SAMPLES][MAX_MATCH] = {{0} };
+
+ uint32_t match_count_ht[NUM_SAMPLES];
+ uint32_t match_count_vbf[NUM_SAMPLES];
+ uint32_t match_count_cache[NUM_SAMPLES];
+
+ uint32_t num_key_ht = NUM_SAMPLES;
+ uint32_t num_key_vbf = NUM_SAMPLES;
+ uint32_t num_key_cache = NUM_SAMPLES;
+
+ const void *key_array[NUM_SAMPLES];
+
+ uint32_t i, j;
+
+ /* Same key at most inserted 2*entry_per_bucket times for HT mode */
+ for (i = M_MATCH_S; i <= M_MATCH_E; i += M_MATCH_STEP) {
+ for (j = 0; j < NUM_SAMPLES; j++) {
+ ret_ht = rte_member_add(setsum_ht, &keys[j], i);
+ ret_vbf = rte_member_add(setsum_vbf, &keys[j], i);
+ ret_cache = rte_member_add(setsum_cache, &keys[j], i);
+
+ TEST_ASSERT(ret_ht >= 0 && ret_vbf >= 0 &&
+ ret_cache >= 0,
+ "insert function error");
+ }
+ }
+
+ /* Single multimatch test */
+ for (i = 0; i < NUM_SAMPLES; i++) {
+ ret_vbf = rte_member_lookup_multi(setsum_vbf, &keys[i],
+ MAX_MATCH, set_ids_vbf);
+ ret_ht = rte_member_lookup_multi(setsum_ht, &keys[i],
+ MAX_MATCH, set_ids_ht);
+ ret_cache = rte_member_lookup_multi(setsum_cache, &keys[i],
+ MAX_MATCH, set_ids_cache);
+ /*
+ * For cache mode, keys overwrite when signature same.
+ * the mutimatch should work like single match.
+ */
+ TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&
+ ret_cache == 1,
+ "single lookup_multi error");
+ TEST_ASSERT(set_ids_cache[0] == M_MATCH_E,
+ "single lookup_multi cache error");
+
+ for (j = 1; j <= M_MATCH_CNT; j++) {
+ TEST_ASSERT(set_ids_ht[j-1] == j * M_MATCH_STEP - 1 &&
+ set_ids_vbf[j-1] ==
+ j * M_MATCH_STEP - 1,
+ "single multimatch lookup error");
+ }
+ }
+ printf("lookup single key for multimatch success\n");
+
+ /* Bulk multimatch test */
+ for (i = 0; i < NUM_SAMPLES; i++)
+ key_array[i] = &keys[i];
+ ret_vbf = rte_member_lookup_multi_bulk(setsum_vbf,
+ &key_array[0], num_key_ht, MAX_MATCH, match_count_vbf,
+ (member_set_t *)set_ids_vbf_m);
+
+ ret_ht = rte_member_lookup_multi_bulk(setsum_ht,
+ &key_array[0], num_key_vbf, MAX_MATCH, match_count_ht,
+ (member_set_t *)set_ids_ht_m);
+
+ ret_cache = rte_member_lookup_multi_bulk(setsum_cache,
+ &key_array[0], num_key_cache, MAX_MATCH,
+ match_count_cache, (member_set_t *)set_ids_cache_m);
+
+
+ for (j = 0; j < NUM_SAMPLES; j++) {
+ TEST_ASSERT(match_count_ht[j] == M_MATCH_CNT,
+ "bulk multimatch lookup HT match count error");
+ TEST_ASSERT(match_count_vbf[j] == M_MATCH_CNT,
+ "bulk multimatch lookup vBF match count error");
+ TEST_ASSERT(match_count_cache[j] == 1,
+ "bulk multimatch lookup CACHE match count error");
+ TEST_ASSERT(set_ids_cache_m[j][0] == M_MATCH_E,
+ "bulk multimatch lookup CACHE set value error");
+
+ for (i = 1; i <= M_MATCH_CNT; i++) {
+ TEST_ASSERT(set_ids_ht_m[j][i-1] ==
+ i * M_MATCH_STEP - 1,
+ "bulk multimatch lookup HT set value error");
+ TEST_ASSERT(set_ids_vbf_m[j][i-1] ==
+ i * M_MATCH_STEP - 1,
+ "bulk multimatch lookup vBF set value error");
+ }
+ }
+
+ printf("lookup for bulk multimatch success\n");
+
+ return 0;
+}
+
+static int key_compare(const void *key1, const void *key2)
+{
+ return memcmp(key1, key2, KEY_SIZE);
+}
+
+static void
+setup_keys_and_data(void)
+{
+ unsigned int i, j;
+ int num_duplicates;
+
+ /* Reset all arrays */
+ for (i = 0; i < KEY_SIZE; i++)
+ generated_keys[0][i] = 0;
+
+ /* Generate a list of keys, some of which may be duplicates */
+ for (i = 0; i < MAX_ENTRIES; i++) {
+ for (j = 0; j < KEY_SIZE; j++)
+ generated_keys[i][j] = rte_rand() & 0xFF;
+ }
+
+ /* Remove duplicates from the keys array */
+ do {
+ num_duplicates = 0;
+ /* Sort the list of keys to make it easier to find duplicates */
+ qsort(generated_keys, MAX_ENTRIES, KEY_SIZE, key_compare);
+
+ /* Sift through the list of keys and look for duplicates */
+ int num_duplicates = 0;
+ for (i = 0; i < MAX_ENTRIES - 1; i++) {
+ if (memcmp(generated_keys[i], generated_keys[i + 1],
+ KEY_SIZE) == 0) {
+ /* This key already exists, try again */
+ num_duplicates++;
+ for (j = 0; j < KEY_SIZE; j++)
+ generated_keys[i][j] =
+ rte_rand() & 0xFF;
+ }
+ }
+ } while (num_duplicates != 0);
+}
+
+static inline int
+add_generated_keys(struct rte_member_setsum *setsum, unsigned int *added_keys)
+{
+ int ret = 0;
+
+ for (*added_keys = 0; ret >= 0 && *added_keys < MAX_ENTRIES;
+ (*added_keys)++) {
+ uint16_t set = (rte_rand() & 0xf) + 1;
+ ret = rte_member_add(setsum, &generated_keys[*added_keys], set);
+ }
+ return ret;
+}
+
+static inline int
+add_generated_keys_cache(struct rte_member_setsum *setsum,
+ unsigned int *added_keys)
+{
+ int ret = 0;
+
+ for (*added_keys = 0; ret == 0 && *added_keys < MAX_ENTRIES;
+ (*added_keys)++) {
+ uint16_t set = (rte_rand() & 0xf) + 1;
+ ret = rte_member_add(setsum, &generated_keys[*added_keys], set);
+ }
+ return ret;
+}
+
+static int
+test_member_loadfactor(void)
+{
+ unsigned int j;
+ unsigned int added_keys, average_keys_added = 0;
+ int ret;
+
+ setup_keys_and_data();
+
+ rte_member_free(setsum_ht);
+ rte_member_free(setsum_cache);
+ rte_member_free(setsum_vbf);
+
+ params.key_len = KEY_SIZE;
+ params.name = "test_member_ht";
+ params.is_cache = 0;
+ params.type = RTE_MEMBER_TYPE_HT;
+ setsum_ht = rte_member_create(&params);
+
+ params.name = "test_member_cache";
+ params.is_cache = 1;
+ setsum_cache = rte_member_create(&params);
+
+
+ if (setsum_ht == NULL || setsum_cache == NULL) {
+ printf("Creation of setsums fail\n");
+ return -1;
+ }
+ /* Test HT non-cache mode */
+ for (j = 0; j < ITERATIONS; j++) {
+ /* Add random entries until key cannot be added */
+ ret = add_generated_keys(setsum_ht, &added_keys);
+ if (ret != -ENOSPC) {
+ printf("Unexpected error when adding keys\n");
+ return -1;
+ }
+ average_keys_added += added_keys;
+
+ /* Reset the table */
+ rte_member_reset(setsum_ht);
+
+ /* Print a dot to show progress on operations */
+ printf(".");
+ fflush(stdout);
+ }
+
+ average_keys_added /= ITERATIONS;
+
+ printf("\nKeys inserted when no space(non-cache) = %.2f%% (%u/%u)\n",
+ ((double) average_keys_added / params.num_keys * 100),
+ average_keys_added, params.num_keys);
+
+ /* Test cache mode */
+ added_keys = average_keys_added = 0;
+ for (j = 0; j < ITERATIONS; j++) {
+ /* Add random entries until key cannot be added */
+ ret = add_generated_keys_cache(setsum_cache, &added_keys);
+ if (ret != 1) {
+ printf("Unexpected error when adding keys\n");
+ return -1;
+ }
+ average_keys_added += added_keys;
+
+ /* Reset the table */
+ rte_member_reset(setsum_cache);
+
+ /* Print a dot to show progress on operations */
+ printf(".");
+ fflush(stdout);
+ }
+
+ average_keys_added /= ITERATIONS;
+
+ printf("\nKeys inserted when eviction happens(cache)= %.2f%% (%u/%u)\n",
+ ((double) average_keys_added / params.num_keys * 100),
+ average_keys_added, params.num_keys);
+ return 0;
+}
+
+static void
+perform_free(void)
+{
+ rte_member_free(setsum_ht);
+ rte_member_free(setsum_cache);
+ rte_member_free(setsum_vbf);
+}
+
+static int
+test_member(void)
+{
+ if (test_member_create_bad_param() < 0)
+ return -1;
+
+ if (test_member_find_existing() < 0)
+ return -1;
+
+ if (test_member_create() < 0) {
+ perform_free();
+ return -1;
+ }
+ if (test_member_insert() < 0) {
+ perform_free();
+ return -1;
+ }
+ if (test_member_lookup() < 0) {
+ perform_free();
+ return -1;
+ }
+ if (test_member_delete() < 0) {
+ perform_free();
+ return -1;
+ }
+ if (test_member_multimatch() < 0) {
+ perform_free();
+ return -1;
+ }
+ if (test_member_loadfactor() < 0) {
+ rte_member_free(setsum_ht);
+ rte_member_free(setsum_cache);
+ return -1;
+ }
+
+ perform_free();
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(member_autotest, test_member);
diff --git a/test/test/test_member_perf.c b/test/test/test_member_perf.c
new file mode 100644
index 00000000..e13066f1
--- /dev/null
+++ b/test/test/test_member_perf.c
@@ -0,0 +1,654 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_memcpy.h>
+#include <rte_thash.h>
+#include <rte_member.h>
+
+#include "test.h"
+
+#define NUM_KEYSIZES 10
+#define NUM_SHUFFLES 10
+#define MAX_KEYSIZE 64
+#define MAX_ENTRIES (1 << 19)
+#define KEYS_TO_ADD (MAX_ENTRIES * 75 / 100) /* 75% table utilization */
+#define NUM_LOOKUPS (KEYS_TO_ADD * 5) /* Loop among keys added, several times */
+#define VBF_SET_CNT 16
+#define BURST_SIZE 64
+#define VBF_FALSE_RATE 0.03
+
+static unsigned int test_socket_id;
+
+enum sstype {
+ HT = 0,
+ CACHE,
+ VBF,
+ NUM_TYPE
+};
+
+enum operations {
+ ADD = 0,
+ LOOKUP,
+ LOOKUP_BULK,
+ LOOKUP_MULTI,
+ LOOKUP_MULTI_BULK,
+ DELETE,
+ LOOKUP_MISS,
+ NUM_OPERATIONS
+};
+
+struct member_perf_params {
+ struct rte_member_setsum *setsum[NUM_TYPE];
+ uint32_t key_size;
+ unsigned int cycle;
+};
+
+static uint32_t hashtest_key_lens[] = {
+ /* standard key sizes */
+ 4, 8, 16, 32, 48, 64,
+ /* IPv4 SRC + DST + protocol, unpadded */
+ 9,
+ /* IPv4 5-tuple, unpadded */
+ 13,
+ /* IPv6 5-tuple, unpadded */
+ 37,
+ /* IPv6 5-tuple, padded to 8-byte boundary */
+ 40
+};
+
+/* Array to store number of cycles per operation */
+uint64_t cycles[NUM_TYPE][NUM_KEYSIZES][NUM_OPERATIONS];
+uint64_t false_data[NUM_TYPE][NUM_KEYSIZES];
+uint64_t false_data_bulk[NUM_TYPE][NUM_KEYSIZES];
+uint64_t false_data_multi[NUM_TYPE][NUM_KEYSIZES];
+uint64_t false_data_multi_bulk[NUM_TYPE][NUM_KEYSIZES];
+
+uint64_t false_hit[NUM_TYPE][NUM_KEYSIZES];
+
+member_set_t data[NUM_TYPE][/* Array to store the data */KEYS_TO_ADD];
+
+/* Array to store all input keys */
+uint8_t keys[KEYS_TO_ADD][MAX_KEYSIZE];
+
+/* Shuffle the keys that have been added, so lookups will be totally random */
+static void
+shuffle_input_keys(struct member_perf_params *params)
+{
+ member_set_t temp_data;
+ unsigned int i, j;
+ uint32_t swap_idx;
+ uint8_t temp_key[MAX_KEYSIZE];
+
+ for (i = KEYS_TO_ADD - 1; i > 0; i--) {
+ swap_idx = rte_rand() % i;
+ memcpy(temp_key, keys[i], hashtest_key_lens[params->cycle]);
+ memcpy(keys[i], keys[swap_idx],
+ hashtest_key_lens[params->cycle]);
+ memcpy(keys[swap_idx], temp_key,
+ hashtest_key_lens[params->cycle]);
+ for (j = 0; j < NUM_TYPE; j++) {
+ temp_data = data[j][i];
+ data[j][i] = data[j][swap_idx];
+ data[j][swap_idx] = temp_data;
+ }
+ }
+}
+
+static int key_compare(const void *key1, const void *key2)
+{
+ return memcmp(key1, key2, MAX_KEYSIZE);
+}
+
+struct rte_member_parameters member_params = {
+ .num_keys = MAX_ENTRIES, /* Total hash table entries. */
+ .key_len = 4, /* Length of hash key. */
+
+ /* num_set and false_positive_rate only relevant to vBF */
+ .num_set = VBF_SET_CNT,
+ .false_positive_rate = 0.03,
+ .prim_hash_seed = 0,
+ .sec_hash_seed = 1,
+ .socket_id = 0, /* NUMA Socket ID for memory. */
+ };
+
+static int
+setup_keys_and_data(struct member_perf_params *params, unsigned int cycle,
+ int miss)
+{
+ unsigned int i, j;
+ int num_duplicates;
+
+ params->key_size = hashtest_key_lens[cycle];
+ params->cycle = cycle;
+
+ /* Reset all arrays */
+ for (i = 0; i < params->key_size; i++)
+ keys[0][i] = 0;
+
+ /* Generate a list of keys, some of which may be duplicates */
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ for (j = 0; j < params->key_size; j++)
+ keys[i][j] = rte_rand() & 0xFF;
+
+ data[HT][i] = data[CACHE][i] = (rte_rand() & 0x7FFE) + 1;
+ data[VBF][i] = rte_rand() % VBF_SET_CNT + 1;
+ }
+
+ /* Remove duplicates from the keys array */
+ do {
+ num_duplicates = 0;
+
+ /* Sort the list of keys to make it easier to find duplicates */
+ qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare);
+
+ /* Sift through the list of keys and look for duplicates */
+ int num_duplicates = 0;
+ for (i = 0; i < KEYS_TO_ADD - 1; i++) {
+ if (memcmp(keys[i], keys[i + 1],
+ params->key_size) == 0) {
+ /* This key already exists, try again */
+ num_duplicates++;
+ for (j = 0; j < params->key_size; j++)
+ keys[i][j] = rte_rand() & 0xFF;
+ }
+ }
+ } while (num_duplicates != 0);
+
+ /* Shuffle the random values again */
+ shuffle_input_keys(params);
+
+ /* For testing miss lookup, we insert half and lookup the other half */
+ unsigned int entry_cnt, bf_key_cnt;
+ if (!miss) {
+ entry_cnt = MAX_ENTRIES;
+ bf_key_cnt = KEYS_TO_ADD;
+ } else {
+ entry_cnt = MAX_ENTRIES / 2;
+ bf_key_cnt = KEYS_TO_ADD / 2;
+ }
+ member_params.false_positive_rate = VBF_FALSE_RATE;
+ member_params.key_len = params->key_size;
+ member_params.socket_id = test_socket_id;
+ member_params.num_keys = entry_cnt;
+ member_params.name = "test_member_ht";
+ member_params.is_cache = 0;
+ member_params.type = RTE_MEMBER_TYPE_HT;
+ params->setsum[HT] = rte_member_create(&member_params);
+ if (params->setsum[HT] == NULL)
+ fprintf(stderr, "ht create fail\n");
+
+ member_params.name = "test_member_cache";
+ member_params.is_cache = 1;
+ params->setsum[CACHE] = rte_member_create(&member_params);
+ if (params->setsum[CACHE] == NULL)
+ fprintf(stderr, "CACHE create fail\n");
+
+ member_params.name = "test_member_vbf";
+ member_params.type = RTE_MEMBER_TYPE_VBF;
+ member_params.num_keys = bf_key_cnt;
+ params->setsum[VBF] = rte_member_create(&member_params);
+ if (params->setsum[VBF] == NULL)
+ fprintf(stderr, "VBF create fail\n");
+ for (i = 0; i < NUM_TYPE; i++) {
+ if (params->setsum[i] == NULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+timed_adds(struct member_perf_params *params, int type)
+{
+ const uint64_t start_tsc = rte_rdtsc();
+ unsigned int i, a;
+ int32_t ret;
+
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ ret = rte_member_add(params->setsum[type], &keys[i],
+ data[type][i]);
+ if (ret < 0) {
+ printf("Error %d in rte_member_add - key=0x", ret);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x", keys[i][a]);
+ printf(" value=%d, type: %d\n", data[type][i], type);
+
+ return -1;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][ADD] = time_taken / KEYS_TO_ADD;
+ return 0;
+}
+
+static int
+timed_lookups(struct member_perf_params *params, int type)
+{
+ unsigned int i, j;
+
+ false_data[type][params->cycle] = 0;
+
+ const uint64_t start_tsc = rte_rdtsc();
+ member_set_t result;
+ int ret;
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD; j++) {
+ ret = rte_member_lookup(params->setsum[type], &keys[j],
+ &result);
+ if (ret < 0) {
+ printf("lookup wrong internally");
+ return -1;
+ }
+ if (type == HT && result == RTE_MEMBER_NO_MATCH) {
+ printf("HT mode shouldn't have false negative");
+ return -1;
+ }
+ if (result != data[type][j])
+ false_data[type][params->cycle]++;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][LOOKUP] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_lookups_bulk(struct member_perf_params *params, int type)
+{
+ unsigned int i, j, k;
+ member_set_t result[BURST_SIZE] = {0};
+ const void *keys_burst[BURST_SIZE];
+ int ret;
+
+ false_data_bulk[type][params->cycle] = 0;
+
+ const uint64_t start_tsc = rte_rdtsc();
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD / BURST_SIZE; j++) {
+ for (k = 0; k < BURST_SIZE; k++)
+ keys_burst[k] = keys[j * BURST_SIZE + k];
+
+ ret = rte_member_lookup_bulk(params->setsum[type],
+ keys_burst,
+ BURST_SIZE,
+ result);
+ if (ret <= 0) {
+ printf("lookup bulk has wrong return value\n");
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ uint32_t data_idx = j * BURST_SIZE + k;
+ if (type == HT && result[k] ==
+ RTE_MEMBER_NO_MATCH) {
+ printf("HT mode shouldn't have "
+ "false negative");
+ return -1;
+ }
+ if (result[k] != data[type][data_idx])
+ false_data_bulk[type][params->cycle]++;
+ }
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][LOOKUP_BULK] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_lookups_multimatch(struct member_perf_params *params, int type)
+{
+ unsigned int i, j;
+ member_set_t result[RTE_MEMBER_BUCKET_ENTRIES] = {0};
+ int ret;
+ false_data_multi[type][params->cycle] = 0;
+
+ const uint64_t start_tsc = rte_rdtsc();
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD; j++) {
+ ret = rte_member_lookup_multi(params->setsum[type],
+ &keys[j], RTE_MEMBER_BUCKET_ENTRIES, result);
+ if (type != CACHE && ret <= 0) {
+ printf("lookup multi has wrong return value %d,"
+ "type %d\n", ret, type);
+ }
+ if (type == HT && ret == 0) {
+ printf("HT mode shouldn't have false negative");
+ return -1;
+ }
+ /*
+ * For performance test purpose, we do not iterate all
+ * results here. We assume most likely each key can only
+ * find one match which is result[0].
+ */
+ if (result[0] != data[type][j])
+ false_data_multi[type][params->cycle]++;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][LOOKUP_MULTI] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_lookups_multimatch_bulk(struct member_perf_params *params, int type)
+{
+ unsigned int i, j, k;
+ member_set_t result[BURST_SIZE][RTE_MEMBER_BUCKET_ENTRIES] = {{0} };
+ const void *keys_burst[BURST_SIZE];
+ uint32_t match_count[BURST_SIZE];
+ int ret;
+
+ false_data_multi_bulk[type][params->cycle] = 0;
+
+ const uint64_t start_tsc = rte_rdtsc();
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD / BURST_SIZE; j++) {
+ for (k = 0; k < BURST_SIZE; k++)
+ keys_burst[k] = keys[j * BURST_SIZE + k];
+
+ ret = rte_member_lookup_multi_bulk(
+ params->setsum[type],
+ keys_burst, BURST_SIZE,
+ RTE_MEMBER_BUCKET_ENTRIES, match_count,
+ (member_set_t *)result);
+ if (ret < 0) {
+ printf("lookup multimatch bulk has wrong return"
+ " value\n");
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if (type != CACHE && match_count[k] == 0) {
+ printf("lookup multimatch bulk get "
+ "wrong match count\n");
+ return -1;
+ }
+ if (type == HT && match_count[k] == 0) {
+ printf("HT mode shouldn't have "
+ "false negative");
+ return -1;
+ }
+ uint32_t data_idx = j * BURST_SIZE + k;
+ if (result[k][0] != data[type][data_idx])
+ false_data_multi_bulk[type][params->cycle]++;
+ }
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][LOOKUP_MULTI_BULK] = time_taken /
+ NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_deletes(struct member_perf_params *params, int type)
+{
+ unsigned int i;
+ int32_t ret;
+
+ if (type == VBF)
+ return 0;
+ const uint64_t start_tsc = rte_rdtsc();
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ ret = rte_member_delete(params->setsum[type], &keys[i],
+ data[type][i]);
+ if (type != CACHE && ret < 0) {
+ printf("delete error\n");
+ return -1;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][DELETE] = time_taken / KEYS_TO_ADD;
+
+ return 0;
+}
+
+static int
+timed_miss_lookup(struct member_perf_params *params, int type)
+{
+ unsigned int i, j;
+ int ret;
+
+ false_hit[type][params->cycle] = 0;
+
+ for (i = 0; i < KEYS_TO_ADD / 2; i++) {
+ ret = rte_member_add(params->setsum[type], &keys[i],
+ data[type][i]);
+ if (ret < 0) {
+ unsigned int a;
+ printf("Error %d in rte_member_add - key=0x", ret);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x", keys[i][a]);
+ printf(" value=%d, type: %d\n", data[type][i], type);
+
+ return -1;
+ }
+ }
+
+ const uint64_t start_tsc = rte_rdtsc();
+ member_set_t result;
+
+ for (i = 0; i < 2 * NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = KEYS_TO_ADD / 2; j < KEYS_TO_ADD; j++) {
+ ret = rte_member_lookup(params->setsum[type], &keys[j],
+ &result);
+ if (ret < 0) {
+ printf("lookup wrong internally");
+ return -1;
+ }
+ if (result != RTE_MEMBER_NO_MATCH)
+ false_hit[type][params->cycle]++;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[type][params->cycle][LOOKUP_MISS] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static void
+perform_frees(struct member_perf_params *params)
+{
+ int i;
+ for (i = 0; i < NUM_TYPE; i++) {
+ if (params->setsum[i] != NULL) {
+ rte_member_free(params->setsum[i]);
+ params->setsum[i] = NULL;
+ }
+ }
+}
+
+static int
+exit_with_fail(const char *testname, struct member_perf_params *params,
+ unsigned int i, unsigned int j)
+{
+ printf("<<<<<Test %s failed at keysize %d iteration %d type %d>>>>>\n",
+ testname, hashtest_key_lens[params->cycle], i, j);
+ perform_frees(params);
+ return -1;
+}
+
+static int
+run_all_tbl_perf_tests(void)
+{
+ unsigned int i, j, k;
+ struct member_perf_params params;
+
+ printf("Measuring performance, please wait\n");
+ fflush(stdout);
+
+ test_socket_id = rte_socket_id();
+
+ for (i = 0; i < NUM_KEYSIZES; i++) {
+ if (setup_keys_and_data(&params, i, 0) < 0) {
+ printf("Could not create keys/data/table\n");
+ return -1;
+ }
+ for (j = 0; j < NUM_TYPE; j++) {
+
+ if (timed_adds(&params, j) < 0)
+ return exit_with_fail("timed_adds", &params,
+ i, j);
+
+ for (k = 0; k < NUM_SHUFFLES; k++)
+ shuffle_input_keys(&params);
+
+ if (timed_lookups(&params, j) < 0)
+ return exit_with_fail("timed_lookups", &params,
+ i, j);
+
+ if (timed_lookups_bulk(&params, j) < 0)
+ return exit_with_fail("timed_lookups_bulk",
+ &params, i, j);
+
+ if (timed_lookups_multimatch(&params, j) < 0)
+ return exit_with_fail("timed_lookups_multi",
+ &params, i, j);
+
+ if (timed_lookups_multimatch_bulk(&params, j) < 0)
+ return exit_with_fail("timed_lookups_multi_bulk",
+ &params, i, j);
+
+ if (timed_deletes(&params, j) < 0)
+ return exit_with_fail("timed_deletes", &params,
+ i, j);
+
+ /* Print a dot to show progress on operations */
+ }
+ printf(".");
+ fflush(stdout);
+
+ perform_frees(&params);
+ }
+
+ /* Test false positive rate using un-inserted keys */
+ for (i = 0; i < NUM_KEYSIZES; i++) {
+ if (setup_keys_and_data(&params, i, 1) < 0) {
+ printf("Could not create keys/data/table\n");
+ return -1;
+ }
+ for (j = 0; j < NUM_TYPE; j++) {
+ if (timed_miss_lookup(&params, j) < 0)
+ return exit_with_fail("timed_miss_lookup",
+ &params, i, j);
+ }
+ perform_frees(&params);
+ }
+
+ printf("\nResults (in CPU cycles/operation)\n");
+ printf("-----------------------------------\n");
+ printf("\n%-18s%-18s%-18s%-18s%-18s%-18s%-18s%-18s%-18s\n",
+ "Keysize", "type", "Add", "Lookup", "Lookup_bulk",
+ "lookup_multi", "lookup_multi_bulk", "Delete",
+ "miss_lookup");
+ for (i = 0; i < NUM_KEYSIZES; i++) {
+ for (j = 0; j < NUM_TYPE; j++) {
+ printf("%-18d", hashtest_key_lens[i]);
+ printf("%-18d", j);
+ for (k = 0; k < NUM_OPERATIONS; k++)
+ printf("%-18"PRIu64, cycles[j][i][k]);
+ printf("\n");
+ }
+ }
+
+ printf("\nFalse results rate (and false positive rate)\n");
+ printf("-----------------------------------\n");
+ printf("\n%-18s%-18s%-18s%-18s%-18s%-18s%-18s\n",
+ "Keysize", "type", "fr_single", "fr_bulk", "fr_multi",
+ "fr_multi_bulk", "false_positive_rate");
+ /* Key size not influence False rate so just print out one key size */
+ for (i = 0; i < 1; i++) {
+ for (j = 0; j < NUM_TYPE; j++) {
+ printf("%-18d", hashtest_key_lens[i]);
+ printf("%-18d", j);
+ printf("%-18f", (float)false_data[j][i] / NUM_LOOKUPS);
+ printf("%-18f", (float)false_data_bulk[j][i] /
+ NUM_LOOKUPS);
+ printf("%-18f", (float)false_data_multi[j][i] /
+ NUM_LOOKUPS);
+ printf("%-18f", (float)false_data_multi_bulk[j][i] /
+ NUM_LOOKUPS);
+ printf("%-18f", (float)false_hit[j][i] /
+ NUM_LOOKUPS);
+ printf("\n");
+ }
+ }
+ return 0;
+}
+
+static int
+test_member_perf(void)
+{
+
+ if (run_all_tbl_perf_tests() < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(member_perf_autotest, test_member_perf);
diff --git a/test/test/test_memcpy_perf.c b/test/test/test_memcpy_perf.c
index ff3aaaac..f1084ca1 100644
--- a/test/test/test_memcpy_perf.c
+++ b/test/test/test_memcpy_perf.c
@@ -35,6 +35,7 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
+#include <sys/time.h>
#include <rte_common.h>
#include <rte_cycles.h>
@@ -217,8 +218,9 @@ do { \
memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
total_time2 += rte_rdtsc() - start_time; \
} \
- printf("%8.0f -", (double)total_time /TEST_ITERATIONS); \
- printf("%5.0f", (double)total_time2 / TEST_ITERATIONS); \
+ printf("%3.0f -", (double)total_time / TEST_ITERATIONS); \
+ printf("%3.0f", (double)total_time2 / TEST_ITERATIONS); \
+ printf("(%6.2f%%) ", ((double)total_time - total_time2)*100/total_time2); \
} while (0)
/* Run aligned memcpy tests for each cached/uncached permutation */
@@ -300,6 +302,9 @@ static int
perf_test(void)
{
int ret;
+ struct timeval tv_begin, tv_end;
+ double time_aligned, time_unaligned;
+ double time_aligned_const, time_unaligned_const;
ret = init_buffers();
if (ret != 0)
@@ -316,25 +321,47 @@ perf_test(void)
do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE);
printf("\n** rte_memcpy() - memcpy perf. tests (C = compile-time constant) **\n"
- "======= ============== ============== ============== ==============\n"
- " Size Cache to cache Cache to mem Mem to cache Mem to mem\n"
- "(bytes) (ticks) (ticks) (ticks) (ticks)\n"
- "------- -------------- -------------- -------------- --------------");
+ "======= ================= ================= ================= =================\n"
+ " Size Cache to cache Cache to mem Mem to cache Mem to mem\n"
+ "(bytes) (ticks) (ticks) (ticks) (ticks)\n"
+ "------- ----------------- ----------------- ----------------- -----------------");
- printf("\n========================== %2dB aligned ============================", ALIGNMENT_UNIT);
+ printf("\n================================= %2dB aligned =================================",
+ ALIGNMENT_UNIT);
/* Do aligned tests where size is a variable */
+ gettimeofday(&tv_begin, NULL);
perf_test_variable_aligned();
- printf("\n------- -------------- -------------- -------------- --------------");
+ gettimeofday(&tv_end, NULL);
+ time_aligned = (double)(tv_end.tv_sec - tv_begin.tv_sec)
+ + ((double)tv_end.tv_usec - tv_begin.tv_usec)/1000000;
+ printf("\n------- ----------------- ----------------- ----------------- -----------------");
/* Do aligned tests where size is a compile-time constant */
+ gettimeofday(&tv_begin, NULL);
perf_test_constant_aligned();
- printf("\n=========================== Unaligned =============================");
+ gettimeofday(&tv_end, NULL);
+ time_aligned_const = (double)(tv_end.tv_sec - tv_begin.tv_sec)
+ + ((double)tv_end.tv_usec - tv_begin.tv_usec)/1000000;
+ printf("\n================================== Unaligned ==================================");
/* Do unaligned tests where size is a variable */
+ gettimeofday(&tv_begin, NULL);
perf_test_variable_unaligned();
- printf("\n------- -------------- -------------- -------------- --------------");
+ gettimeofday(&tv_end, NULL);
+ time_unaligned = (double)(tv_end.tv_sec - tv_begin.tv_sec)
+ + ((double)tv_end.tv_usec - tv_begin.tv_usec)/1000000;
+ printf("\n------- ----------------- ----------------- ----------------- -----------------");
/* Do unaligned tests where size is a compile-time constant */
+ gettimeofday(&tv_begin, NULL);
perf_test_constant_unaligned();
- printf("\n======= ============== ============== ============== ==============\n\n");
-
+ gettimeofday(&tv_end, NULL);
+ time_unaligned_const = (double)(tv_end.tv_sec - tv_begin.tv_sec)
+ + ((double)tv_end.tv_usec - tv_begin.tv_usec)/1000000;
+ printf("\n======= ================= ================= ================= =================\n\n");
+
+ printf("Test Execution Time (seconds):\n");
+ printf("Aligned variable copy size = %8.3f\n", time_aligned);
+ printf("Aligned constant copy size = %8.3f\n", time_aligned_const);
+ printf("Unaligned variable copy size = %8.3f\n", time_unaligned);
+ printf("Unaligned constant copy size = %8.3f\n", time_unaligned_const);
free_buffers();
return 0;
diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c
index 0a442395..37ead503 100644
--- a/test/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -44,7 +44,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
@@ -129,7 +128,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
rte_mempool_dump(stdout, mp);
printf("get an object\n");
- if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
GOTO_ERR(ret, out);
rte_mempool_dump(stdout, mp);
@@ -145,28 +144,28 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
GOTO_ERR(ret, out);
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
printf("get physical address of an object\n");
- if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj))
GOTO_ERR(ret, out);
#endif
printf("put the object back\n");
- rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ rte_mempool_generic_put(mp, &obj, 1, cache);
rte_mempool_dump(stdout, mp);
printf("get 2 objects\n");
- if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
GOTO_ERR(ret, out);
- if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) {
- rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) {
+ rte_mempool_generic_put(mp, &obj, 1, cache);
GOTO_ERR(ret, out);
}
rte_mempool_dump(stdout, mp);
printf("put the objects back\n");
- rte_mempool_generic_put(mp, &obj, 1, cache, 0);
- rte_mempool_generic_put(mp, &obj2, 1, cache, 0);
+ rte_mempool_generic_put(mp, &obj, 1, cache);
+ rte_mempool_generic_put(mp, &obj2, 1, cache);
rte_mempool_dump(stdout, mp);
/*
@@ -178,7 +177,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
GOTO_ERR(ret, out);
for (i = 0; i < MEMPOOL_SIZE; i++) {
- if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0)
+ if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0)
break;
}
@@ -200,7 +199,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
ret = -1;
}
- rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0);
+ rte_mempool_generic_put(mp, &objtable[i], 1, cache);
}
free(objtable);
@@ -474,7 +473,7 @@ test_mempool_same_name_twice_creation(void)
}
/*
- * BAsic test for mempool_xmem functions.
+ * Basic test for mempool_xmem functions.
*/
static int
test_mempool_xmem_misc(void)
@@ -485,10 +484,11 @@ test_mempool_xmem_misc(void)
elt_num = MAX_KEEP;
total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL);
- sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX);
+ sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX,
+ 0);
usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
- MEMPOOL_PG_SHIFT_MAX);
+ MEMPOOL_PG_SHIFT_MAX, 0);
if (sz != (size_t)usz) {
printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
diff --git a/test/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index 07b28c06..749b364e 100644
--- a/test/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -44,7 +44,6 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
@@ -186,7 +185,7 @@ per_lcore_mempool_test(void *arg)
ret = rte_mempool_generic_get(mp,
&obj_table[idx],
n_get_bulk,
- cache, 0);
+ cache);
if (unlikely(ret < 0)) {
rte_mempool_dump(stdout, mp);
/* in this case, objects are lost... */
@@ -200,7 +199,7 @@ per_lcore_mempool_test(void *arg)
while (idx < n_keep) {
rte_mempool_generic_put(mp, &obj_table[idx],
n_put_bulk,
- cache, 0);
+ cache);
idx += n_put_bulk;
}
}
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index 7ae31cf7..c9394c4a 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -78,7 +78,7 @@
/* Test if memory overlaps: return 1 if true, or 0 if false. */
static int
-is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
+is_memory_overlap(rte_iova_t ptr1, size_t len1, rte_iova_t ptr2, size_t len2)
{
if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
return 1;
@@ -510,7 +510,7 @@ test_memzone_aligned(void)
printf("Unable to reserve 64-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_32->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone_aligned_32->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0)
return -1;
@@ -521,7 +521,7 @@ test_memzone_aligned(void)
printf("Unable to reserve 128-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_128->phys_addr & 127) != 0)
+ if ((memzone_aligned_128->iova & 127) != 0)
return -1;
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
return -1;
@@ -532,7 +532,7 @@ test_memzone_aligned(void)
printf("Unable to reserve 256-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_256->phys_addr & 255) != 0)
+ if ((memzone_aligned_256->iova & 255) != 0)
return -1;
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
return -1;
@@ -543,7 +543,7 @@ test_memzone_aligned(void)
printf("Unable to reserve 512-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_512->phys_addr & 511) != 0)
+ if ((memzone_aligned_512->iova & 511) != 0)
return -1;
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
return -1;
@@ -554,7 +554,7 @@ test_memzone_aligned(void)
printf("Unable to reserve 1024-byte aligned memzone!\n");
return -1;
}
- if ((memzone_aligned_1024->phys_addr & 1023) != 0)
+ if ((memzone_aligned_1024->iova & 1023) != 0)
return -1;
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
return -1;
@@ -563,35 +563,35 @@ test_memzone_aligned(void)
/* check that zones don't overlap */
printf("check overlapping\n");
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_128->phys_addr, memzone_aligned_128->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_128->iova, memzone_aligned_128->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_256->iova, memzone_aligned_256->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_256->iova, memzone_aligned_256->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
- memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+ if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
+ memzone_aligned_512->iova, memzone_aligned_512->len))
return -1;
- if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
- if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
- memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+ if (is_memory_overlap(memzone_aligned_512->iova, memzone_aligned_512->len,
+ memzone_aligned_1024->iova, memzone_aligned_1024->len))
return -1;
return 0;
}
@@ -601,9 +601,9 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
uint32_t bound)
{
const struct rte_memzone *mz;
- phys_addr_t bmask;
+ rte_iova_t bmask;
- bmask = ~((phys_addr_t)bound - 1);
+ bmask = ~((rte_iova_t)bound - 1);
if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
align, bound)) == NULL) {
@@ -612,7 +612,7 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
return -1;
}
- if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
+ if ((mz->iova & ((rte_iova_t)align - 1)) != 0) {
printf("%s(%s): invalid phys addr alignment\n",
__func__, mz->name);
return -1;
@@ -631,8 +631,8 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
return -1;
}
- if ((mz->phys_addr & bmask) !=
- ((mz->phys_addr + mz->len - 1) & bmask)) {
+ if ((mz->iova & bmask) !=
+ ((mz->iova + mz->len - 1) & bmask)) {
printf("%s(%s): invalid memzone boundary %u crossed\n",
__func__, mz->name, bound);
return -1;
@@ -787,11 +787,11 @@ test_memzone(void)
/* check cache-line alignments */
printf("check alignments and lengths\n");
- if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone1->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
- if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if ((memzone2->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
- if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0)
+ if (memzone3 != NULL && (memzone3->iova & RTE_CACHE_LINE_MASK) != 0)
return -1;
if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
return -1;
@@ -806,16 +806,16 @@ test_memzone(void)
/* check that zones don't overlap */
printf("check overlapping\n");
- if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
- memzone2->phys_addr, memzone2->len))
+ if (is_memory_overlap(memzone1->iova, memzone1->len,
+ memzone2->iova, memzone2->len))
return -1;
if (memzone3 != NULL &&
- is_memory_overlap(memzone1->phys_addr, memzone1->len,
- memzone3->phys_addr, memzone3->len))
+ is_memory_overlap(memzone1->iova, memzone1->len,
+ memzone3->iova, memzone3->len))
return -1;
if (memzone3 != NULL &&
- is_memory_overlap(memzone2->phys_addr, memzone2->len,
- memzone3->phys_addr, memzone3->len))
+ is_memory_overlap(memzone2->iova, memzone2->len,
+ memzone3->iova, memzone3->len))
return -1;
printf("check socket ID\n");
diff --git a/test/test/test_per_lcore.c b/test/test/test_per_lcore.c
index 247e1714..4eb7fe2c 100644
--- a/test/test/test_per_lcore.c
+++ b/test/test/test_per_lcore.c
@@ -37,7 +37,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
diff --git a/test/test/test_pmd_perf.c b/test/test/test_pmd_perf.c
index 1ffd65a5..255f2607 100644
--- a/test/test/test_pmd_perf.c
+++ b/test/test/test_pmd_perf.c
@@ -50,7 +50,7 @@
#define MAX_PKT_BURST (32)
#define RTE_TEST_RX_DESC_DEFAULT (128)
#define RTE_TEST_TX_DESC_DEFAULT (512)
-#define RTE_PORT_ALL (~(uint8_t)0x0)
+#define RTE_PORT_ALL (~(uint16_t)0x0)
/* how long test would take at full line rate */
#define RTE_TEST_DURATION (2)
@@ -143,7 +143,7 @@ struct lcore_conf {
uint8_t status;
uint8_t socketid;
uint16_t nb_ports;
- uint8_t portlist[RTE_MAX_ETHPORTS];
+ uint16_t portlist[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -160,11 +160,12 @@ static uint32_t sc_flag;
/* Check the link status of all ports in up to 3s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("Checking link statuses...\n");
@@ -179,16 +180,15 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status) {
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
if (link_mbps == 0)
link_mbps = link.link_speed;
} else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -335,7 +335,7 @@ reset_count(void)
}
static void
-stats_display(uint8_t port_id)
+stats_display(uint16_t port_id)
{
struct rte_eth_stats stats;
rte_eth_stats_get(port_id, &stats);
@@ -383,7 +383,7 @@ measure_rxtx(struct lcore_conf *conf,
while (likely(!stop)) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
@@ -422,7 +422,7 @@ measure_rxonly(struct lcore_conf *conf,
portid = conf->portlist[i];
cur_tsc = rte_rdtsc();
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
@@ -459,7 +459,7 @@ measure_txonly(struct lcore_conf *conf,
while (likely(!stop)) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
@@ -537,7 +537,7 @@ main_loop(__rte_unused void *args)
portid = conf->portlist[i];
int nb_free = pkt_per_port;
do { /* dry out */
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
nb_tx = 0;
while (nb_tx < nb_rx)
@@ -572,6 +572,7 @@ poll_burst(void *args)
unsigned i, portid, nb_rx = 0;
uint64_t total;
uint64_t timeout = MAX_IDLE;
+ int num[RTE_MAX_ETHPORTS];
lcore_id = rte_lcore_id();
conf = &lcore_conf[lcore_id];
@@ -591,6 +592,7 @@ poll_burst(void *args)
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
next[portid] = i * pkt_per_port;
+ num[portid] = pkt_per_port;
}
while (!rte_atomic64_read(&start))
@@ -600,9 +602,9 @@ poll_burst(void *args)
while (total) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
- &pkts_burst[next[portid]],
- MAX_PKT_BURST);
+ nb_rx = rte_eth_rx_burst(portid, 0,
+ &pkts_burst[next[portid]],
+ RTE_MIN(MAX_PKT_BURST, num[portid]));
if (unlikely(nb_rx == 0)) {
timeout--;
if (unlikely(timeout == 0))
@@ -610,6 +612,7 @@ poll_burst(void *args)
continue;
}
next[portid] += nb_rx;
+ num[portid] -= nb_rx;
total -= nb_rx;
}
}
@@ -618,7 +621,6 @@ timeout:
printf("%"PRIu64" packets lost, IDLE %"PRIu64" times\n",
total, MAX_IDLE - timeout);
-
/* clean up */
total = pkt_per_port * conf->nb_ports - total;
for (i = 0; i < total; i++)
@@ -644,7 +646,7 @@ exec_burst(uint32_t flags, int lcore)
conf = &lcore_conf[lcore];
pkt_per_port = MAX_TRAFFIC_BURST;
- num = pkt_per_port;
+ num = pkt_per_port * conf->nb_ports;
rte_atomic64_init(&start);
@@ -661,11 +663,12 @@ exec_burst(uint32_t flags, int lcore)
nb_tx = RTE_MIN(MAX_PKT_BURST, num);
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- rte_eth_tx_burst(portid, 0,
+ nb_tx = rte_eth_tx_burst(portid, 0,
&tx_burst[idx], nb_tx);
idx += nb_tx;
+ num -= nb_tx;
}
- num -= nb_tx;
+
}
sleep(5);
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index 004882af..8e9cd331 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -54,7 +54,7 @@ static const volatile unsigned bulk_sizes[] = { 1, 8, 32 };
/* The ring structure used for tests */
static struct rte_ring *r;
-static uint8_t ring_ethdev_port;
+static uint16_t ring_ethdev_port;
/* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
static void
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
index d664b04b..5eb40a0f 100644
--- a/test/test/test_ring.c
+++ b/test/test/test_ring.c
@@ -43,7 +43,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
@@ -375,37 +374,37 @@ test_ring_burst_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
+ if (ret != 1)
goto fail;
printf("enqueue 2 objs\n");
ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
printf("enqueue MAX_BULK objs\n");
ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
printf("dequeue 1 obj\n");
ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
+ if (ret != 1)
goto fail;
printf("dequeue 2 objs\n");
ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
printf("dequeue MAX_BULK objs\n");
ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
/* check data */
@@ -423,22 +422,21 @@ test_ring_burst_basic(void)
for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
+ if (ret != MAX_BULK)
goto fail;
- }
}
printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
/* Always one free entry left */
ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ if (ret != MAX_BULK - 3)
goto fail;
printf("Test if ring is full \n");
@@ -447,26 +445,26 @@ test_ring_burst_basic(void)
printf("Test enqueue for a full entry \n");
ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
- if ((ret & RTE_RING_SZ_MASK) != 0)
+ if (ret != 0)
goto fail;
printf("Test dequeue without enough objects \n");
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
}
/* Available memory space for the exact MAX_BULK entries */
ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ if (ret != MAX_BULK - 3)
goto fail;
printf("Test if ring is empty \n");
@@ -490,37 +488,37 @@ test_ring_burst_basic(void)
printf("enqueue 1 obj\n");
ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
cur_src += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
+ if (ret != 1)
goto fail;
printf("enqueue 2 objs\n");
ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
printf("enqueue MAX_BULK objs\n");
ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
printf("dequeue 1 obj\n");
ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
cur_dst += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
+ if (ret != 1)
goto fail;
printf("dequeue 2 objs\n");
ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
printf("dequeue MAX_BULK objs\n");
ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
/* check data */
@@ -538,11 +536,11 @@ test_ring_burst_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
}
@@ -561,19 +559,19 @@ test_ring_burst_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
}
/* Available memory space for the exact MAX_BULK objects */
ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
cur_src += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ if (ret != MAX_BULK - 3)
goto fail;
@@ -581,19 +579,19 @@ test_ring_burst_basic(void)
for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ if (ret != MAX_BULK)
goto fail;
}
/* Available objects - the exact MAX_BULK */
ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
cur_dst += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ if (ret != MAX_BULK - 3)
goto fail;
/* check data */
@@ -611,7 +609,7 @@ test_ring_burst_basic(void)
ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
+ if (ret != 2)
goto fail;
ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
@@ -749,7 +747,7 @@ test_ring_basic_ex(void)
/* Covering the ring burst operation */
ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
- if ((ret & RTE_RING_SZ_MASK) != 2) {
+ if (ret != 2) {
printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
goto fail_test;
}
diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
index 3dd2ed4c..08685d32 100644
--- a/test/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
@@ -38,7 +38,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
diff --git a/test/test/test_service_cores.c b/test/test/test_service_cores.c
index 88fac8f4..311c704e 100644
--- a/test/test/test_service_cores.c
+++ b/test/test/test_service_cores.c
@@ -131,17 +131,13 @@ static int
unregister_all(void)
{
uint32_t i;
- struct rte_service_spec *dead = (struct rte_service_spec *)0xdead;
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_unregister(0),
- "Unregistered NULL pointer");
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_unregister(dead),
- "Unregistered invalid pointer");
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_component_unregister(1000),
+ "Unregistered invalid service id");
uint32_t c = rte_service_get_count();
for (i = 0; i < c; i++) {
- struct rte_service_spec *s = rte_service_get_by_id(i);
- TEST_ASSERT_EQUAL(0, rte_service_unregister(s),
+ TEST_ASSERT_EQUAL(0, rte_service_component_unregister(i),
"Error unregistering a valid service");
}
@@ -160,17 +156,22 @@ dummy_register(void)
struct rte_service_spec service;
memset(&service, 0, sizeof(struct rte_service_spec));
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(-EINVAL,
+ rte_service_component_register(&service, NULL),
"Invalid callback");
service.callback = dummy_cb;
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(-EINVAL,
+ rte_service_component_register(&service, NULL),
"Invalid name");
snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
- TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ uint32_t id;
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, &id),
"Failed to register valid service");
+ rte_service_component_runstate_set(id, 1);
+
return TEST_SUCCESS;
}
@@ -180,32 +181,41 @@ service_get_by_name(void)
{
unregister_all();
- /* ensure with no services registered returns NULL */
- TEST_ASSERT_EQUAL(0, rte_service_get_by_name(DUMMY_SERVICE_NAME),
- "Service get by name should return NULL");
+ uint32_t sid;
+ TEST_ASSERT_EQUAL(-ENODEV,
+ rte_service_get_by_name(DUMMY_SERVICE_NAME, &sid),
+ "get by name with invalid name should return -ENODEV");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ rte_service_get_by_name(DUMMY_SERVICE_NAME, 0x0),
+ "get by name with NULL ptr should return -ENODEV");
/* register service */
struct rte_service_spec service;
memset(&service, 0, sizeof(struct rte_service_spec));
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(-EINVAL,
+ rte_service_component_register(&service, NULL),
"Invalid callback");
service.callback = dummy_cb;
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(-EINVAL,
+ rte_service_component_register(&service, NULL),
"Invalid name");
snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
- TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, NULL),
"Failed to register valid service");
- /* ensure with dummy services registered returns same ptr as ID */
- struct rte_service_spec *s_by_id = rte_service_get_by_id(0);
- TEST_ASSERT_EQUAL(s_by_id, rte_service_get_by_name(DUMMY_SERVICE_NAME),
- "Service get_by_name should equal get_by_id()");
+ /* we unregistered all service, now registering 1, should be id 0 */
+ uint32_t service_id_as_expected = 0;
+ TEST_ASSERT_EQUAL(0, rte_service_get_by_name(DUMMY_SERVICE_NAME, &sid),
+ "Service get_by_name should return 0 on valid inputs");
+ TEST_ASSERT_EQUAL(service_id_as_expected, sid,
+ "Service get_by_name should equal expected id");
unregister_all();
/* ensure after unregister, get_by_name returns NULL */
- TEST_ASSERT_EQUAL(0, rte_service_get_by_name(DUMMY_SERVICE_NAME),
- "get by name should return NULL after unregister");
+ TEST_ASSERT_EQUAL(-ENODEV,
+ rte_service_get_by_name(DUMMY_SERVICE_NAME, &sid),
+ "get by name should return -ENODEV after unregister");
return TEST_SUCCESS;
}
@@ -221,12 +231,12 @@ service_probe_capability(void)
service.callback = dummy_cb;
snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
- TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, NULL),
"Register of MT SAFE service failed");
/* verify flag is enabled */
- struct rte_service_spec *s = rte_service_get_by_id(0);
- int32_t mt = rte_service_probe_capability(s, RTE_SERVICE_CAP_MT_SAFE);
+ const uint32_t sid = 0;
+ int32_t mt = rte_service_probe_capability(sid, RTE_SERVICE_CAP_MT_SAFE);
TEST_ASSERT_EQUAL(1, mt, "MT SAFE capability flag not set.");
@@ -235,12 +245,11 @@ service_probe_capability(void)
memset(&service, 0, sizeof(struct rte_service_spec));
service.callback = dummy_cb;
snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
- TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, NULL),
"Register of non-MT safe service failed");
/* verify flag is enabled */
- s = rte_service_get_by_id(0);
- mt = rte_service_probe_capability(s, RTE_SERVICE_CAP_MT_SAFE);
+ mt = rte_service_probe_capability(sid, RTE_SERVICE_CAP_MT_SAFE);
TEST_ASSERT_EQUAL(0, mt, "MT SAFE cap flag set on non MT SAFE service");
return unregister_all();
@@ -250,9 +259,8 @@ service_probe_capability(void)
static int
service_name(void)
{
- struct rte_service_spec *service = rte_service_get_by_id(0);
-
- int equal = strcmp(service->name, DUMMY_SERVICE_NAME);
+ const char *name = rte_service_get_name(0);
+ int equal = strcmp(name, DUMMY_SERVICE_NAME);
TEST_ASSERT_EQUAL(0, equal, "Error: Service name not correct");
return unregister_all();
@@ -262,11 +270,11 @@ service_name(void)
static int
service_dump(void)
{
- struct rte_service_spec *service = rte_service_get_by_id(0);
- rte_service_set_stats_enable(service, 1);
- rte_service_dump(stdout, service);
- rte_service_set_stats_enable(service, 0);
- rte_service_dump(stdout, service);
+ const uint32_t sid = 0;
+ rte_service_set_stats_enable(sid, 1);
+ rte_service_dump(stdout, 0);
+ rte_service_set_stats_enable(sid, 0);
+ rte_service_dump(stdout, 0);
return unregister_all();
}
@@ -274,28 +282,28 @@ service_dump(void)
static int
service_start_stop(void)
{
- struct rte_service_spec *service = rte_service_get_by_id(0);
+ const uint32_t sid = 0;
- /* is_running() returns if service is running and slcore is mapped */
+ /* runstate_get() returns if service is running and slcore is mapped */
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
"Service core add did not return zero");
- int ret = rte_service_enable_on_lcore(service, slcore_id);
+ int ret = rte_service_map_lcore_set(sid, slcore_id, 1);
TEST_ASSERT_EQUAL(0, ret,
"Enabling service core, expected 0 got %d", ret);
- TEST_ASSERT_EQUAL(0, rte_service_is_running(service),
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_get(sid),
"Error: Service should be stopped");
- TEST_ASSERT_EQUAL(0, rte_service_stop(service),
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0),
"Error: Service stopped returned non-zero");
- TEST_ASSERT_EQUAL(0, rte_service_is_running(service),
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_get(sid),
"Error: Service is running - should be stopped");
- TEST_ASSERT_EQUAL(0, rte_service_start(service),
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 1),
"Error: Service start returned non-zero");
- TEST_ASSERT_EQUAL(1, rte_service_is_running(service),
+ TEST_ASSERT_EQUAL(1, rte_service_runstate_get(sid),
"Error: Service is not running");
return unregister_all();
@@ -314,12 +322,12 @@ service_remote_launch_func(void *arg)
static int
service_lcore_en_dis_able(void)
{
- struct rte_service_spec *s = rte_service_get_by_id(0);
+ const uint32_t sid = 0;
/* expected failure cases */
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_enable_on_lcore(s, 100000),
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_map_lcore_set(sid, 100000, 1),
"Enable on invalid core did not fail");
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_disable_on_lcore(s, 100000),
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_map_lcore_set(sid, 100000, 0),
"Disable on invalid core did not fail");
/* add service core to allow enabling */
@@ -327,15 +335,15 @@ service_lcore_en_dis_able(void)
"Add service core failed when not in use before");
/* valid enable */
- TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_id, 1),
"Enabling valid service and core failed");
- TEST_ASSERT_EQUAL(1, rte_service_get_enabled_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(1, rte_service_map_lcore_get(sid, slcore_id),
"Enabled core returned not-enabled");
/* valid disable */
- TEST_ASSERT_EQUAL(0, rte_service_disable_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_id, 0),
"Disabling valid service and lcore failed");
- TEST_ASSERT_EQUAL(0, rte_service_get_enabled_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_get(sid, slcore_id),
"Disabled core returned enabled");
/* call remote_launch to verify that app can launch ex-service lcore */
@@ -465,21 +473,19 @@ service_threaded_test(int mt_safe)
if (mt_safe) {
service.callback = dummy_mt_safe_cb;
service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
- } else {
- /* initialize to pass, see callback comment for details */
- test_params[1] = 1;
+ } else
service.callback = dummy_mt_unsafe_cb;
- }
- TEST_ASSERT_EQUAL(0, rte_service_register(&service),
+ uint32_t id;
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, &id),
"Register of MT SAFE service failed");
- struct rte_service_spec *s = rte_service_get_by_id(0);
- TEST_ASSERT_EQUAL(0, rte_service_start(s),
+ const uint32_t sid = 0;
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 1),
"Starting valid service failed");
- TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_1),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_1, 1),
"Failed to enable lcore 1 on mt safe service");
- TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_2),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_2, 1),
"Failed to enable lcore 2 on mt safe service");
rte_service_lcore_start(slcore_1);
rte_service_lcore_start(slcore_2);
@@ -489,10 +495,27 @@ service_threaded_test(int mt_safe)
rte_service_lcore_stop(slcore_1);
rte_service_lcore_stop(slcore_2);
+ TEST_ASSERT_EQUAL(0, test_params[1],
+ "Service run with component runstate = 0");
+
+ /* enable backend runstate: the service should run after this */
+ rte_service_component_runstate_set(id, 1);
+
+ /* initialize to pass, see callback comment for details */
+ if (!mt_safe)
+ test_params[1] = 1;
+
+ rte_service_lcore_start(slcore_1);
+ rte_service_lcore_start(slcore_2);
+
+ /* wait for the worker threads to run */
+ rte_delay_ms(500);
+ rte_service_lcore_stop(slcore_1);
+ rte_service_lcore_stop(slcore_2);
+
TEST_ASSERT_EQUAL(1, test_params[1],
"MT Safe service not run by two cores concurrently");
-
- TEST_ASSERT_EQUAL(0, rte_service_stop(s),
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0),
"Failed to stop MT Safe service");
unregister_all();
@@ -525,15 +548,122 @@ service_mt_unsafe_poll(void)
return TEST_SUCCESS;
}
+static int32_t
+delay_as_a_mt_safe_service(void *args)
+{
+ RTE_SET_USED(args);
+ uint32_t *params = args;
+
+ /* retrieve done flag and atomic lock to inc/dec */
+ uint32_t *done = &params[0];
+ rte_atomic32_t *lock = (rte_atomic32_t *)&params[1];
+
+ while (!*done) {
+ rte_atomic32_inc(lock);
+ rte_delay_us(500);
+ if (rte_atomic32_read(lock) > 1)
+ /* pass: second core has simultaneously incremented */
+ *done = 1;
+ rte_atomic32_dec(lock);
+ }
+
+ return 0;
+}
+
+static int32_t
+delay_as_a_service(void *args)
+{
+ uint32_t *done = (uint32_t *)args;
+ while (!*done)
+ rte_delay_ms(5);
+ return 0;
+}
+
+static int
+service_run_on_app_core_func(void *arg)
+{
+ uint32_t *delay_service_id = (uint32_t *)arg;
+ return rte_service_run_iter_on_app_lcore(*delay_service_id, 1);
+}
+
+static int
+service_app_lcore_poll_impl(const int mt_safe)
+{
+ uint32_t params[2] = {0};
+
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), MT_SAFE_SERVICE_NAME);
+ if (mt_safe) {
+ service.callback = delay_as_a_mt_safe_service;
+ service.callback_userdata = params;
+ service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
+ } else {
+ service.callback = delay_as_a_service;
+ service.callback_userdata = &params;
+ }
+
+ uint32_t id;
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, &id),
+ "Register of app lcore delay service failed");
+
+ rte_service_component_runstate_set(id, 1);
+ rte_service_runstate_set(id, 1);
+
+ uint32_t app_core2 = rte_get_next_lcore(slcore_id, 1, 1);
+ int app_core2_ret = rte_eal_remote_launch(service_run_on_app_core_func,
+ &id, app_core2);
+
+ rte_delay_ms(100);
+
+ int app_core1_ret = service_run_on_app_core_func(&id);
+
+ /* flag done, then wait for the spawned 2nd core to return */
+ params[0] = 1;
+ rte_eal_mp_wait_lcore();
+
+ /* core two gets launched first - and should hold the service lock */
+ TEST_ASSERT_EQUAL(0, app_core2_ret,
+ "App core2 : run service didn't return zero");
+
+ if (mt_safe) {
+ /* mt safe should have both cores return 0 for success */
+ TEST_ASSERT_EQUAL(0, app_core1_ret,
+ "MT Safe: App core1 didn't return 0");
+ } else {
+ /* core one attempts to run later - should be blocked */
+ TEST_ASSERT_EQUAL(-EBUSY, app_core1_ret,
+ "MT Unsafe: App core1 didn't return -EBUSY");
+ }
+
+ unregister_all();
+
+ return TEST_SUCCESS;
+}
+
+static int
+service_app_lcore_mt_safe(void)
+{
+ const int mt_safe = 1;
+ return service_app_lcore_poll_impl(mt_safe);
+}
+
+static int
+service_app_lcore_mt_unsafe(void)
+{
+ const int mt_safe = 0;
+ return service_app_lcore_poll_impl(mt_safe);
+}
+
/* start and stop a service core - ensuring it goes back to sleep */
static int
service_lcore_start_stop(void)
{
/* start service core and service, create mapping so tick() runs */
- struct rte_service_spec *s = rte_service_get_by_id(0);
- TEST_ASSERT_EQUAL(0, rte_service_start(s),
+ const uint32_t sid = 0;
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 1),
"Starting valid service failed");
- TEST_ASSERT_EQUAL(-EINVAL, rte_service_enable_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_map_lcore_set(sid, slcore_id, 1),
"Enabling valid service on non-service core must fail");
/* core start */
@@ -541,7 +671,7 @@ service_lcore_start_stop(void)
"Service core start without add should return EINVAL");
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
"Service core add did not return zero");
- TEST_ASSERT_EQUAL(0, rte_service_enable_on_lcore(s, slcore_id),
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_id, 1),
"Enabling valid service on valid core failed");
TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
"Service core start after add failed");
@@ -553,6 +683,10 @@ service_lcore_start_stop(void)
"Service core expected to poll service but it didn't");
/* core stop */
+ TEST_ASSERT_EQUAL(-EBUSY, rte_service_lcore_stop(slcore_id),
+ "Service core running a service should return -EBUSY");
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0),
+ "Stopping valid service failed");
TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_stop(100000),
"Invalid Service core stop should return -EINVAL");
TEST_ASSERT_EQUAL(0, rte_service_lcore_stop(slcore_id),
@@ -586,6 +720,8 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(dummy_register, NULL, service_lcore_en_dis_able),
TEST_CASE_ST(dummy_register, NULL, service_mt_unsafe_poll),
TEST_CASE_ST(dummy_register, NULL, service_mt_safe_poll),
+ TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_safe),
+ TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_unsafe),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
diff --git a/test/test/test_spinlock.c b/test/test/test_spinlock.c
index a93f1baa..bb34e0c6 100644
--- a/test/test/test_spinlock.c
+++ b/test/test/test_spinlock.c
@@ -40,7 +40,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
diff --git a/test/test/test_table.c b/test/test/test_table.c
index 9e9eed88..db7d4e66 100644
--- a/test/test/test_table.c
+++ b/test/test/test_table.c
@@ -72,6 +72,7 @@ static void app_init_rings(void);
static void app_init_mbuf_pools(void);
uint64_t pipeline_test_hash(void *key,
+ __attribute__((unused)) void *key_mask,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
diff --git a/test/test/test_table.h b/test/test/test_table.h
index 84d1845a..8c1df333 100644
--- a/test/test/test_table.h
+++ b/test/test/test_table.h
@@ -94,7 +94,7 @@
APP_METADATA_OFFSET(32)); \
k32 = (uint32_t *) key; \
k32[0] = (value); \
- *signature = pipeline_test_hash(key, 0, 0); \
+ *signature = pipeline_test_hash(key, NULL, 0, 0); \
rte_ring_enqueue((ring), m); \
} while (0)
@@ -131,6 +131,7 @@
/* Function definitions */
uint64_t pipeline_test_hash(
void *key,
+ __attribute__((unused)) void *key_mask,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed);
diff --git a/test/test/test_table_combined.c b/test/test/test_table_combined.c
index a2d19a1a..e8637884 100644
--- a/test/test/test_table_combined.c
+++ b/test/test/test_table_combined.c
@@ -441,12 +441,15 @@ test_table_hash8lru(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key8_lru_params key8lru_params = {
- .n_entries = 1<<24,
- .f_hash = pipeline_test_hash,
- .signature_offset = APP_METADATA_OFFSET(0),
+ struct rte_table_hash_params key8lru_params = {
+ .name = "TABLE",
+ .key_size = 8,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
uint8_t key8lru[8];
@@ -475,14 +478,14 @@ test_table_hash8lru(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key8lru_params.n_entries = 0;
+ key8lru_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key8_lru_ops,
(void *)&key8lru_params, (void *)key8lru, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key8lru_params.n_entries = 1<<16;
+ key8lru_params.n_keys = 1<<16;
key8lru_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key8_lru_ops,
@@ -499,13 +502,15 @@ test_table_hash16lru(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key16_lru_params key16lru_params = {
- .n_entries = 1<<16,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
+ struct rte_table_hash_params key16lru_params = {
+ .name = "TABLE",
+ .key_size = 16,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
uint8_t key16lru[16];
@@ -534,14 +539,14 @@ test_table_hash16lru(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key16lru_params.n_entries = 0;
+ key16lru_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key16_lru_ops,
(void *)&key16lru_params, (void *)key16lru, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key16lru_params.n_entries = 1<<16;
+ key16lru_params.n_keys = 1<<16;
key16lru_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key16_lru_ops,
@@ -558,12 +563,15 @@ test_table_hash32lru(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key32_lru_params key32lru_params = {
- .n_entries = 1<<16,
+ struct rte_table_hash_params key32lru_params = {
+ .name = "TABLE",
+ .key_size = 32,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
.f_hash = pipeline_test_hash,
.seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
};
uint8_t key32lru[32];
@@ -592,14 +600,14 @@ test_table_hash32lru(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key32lru_params.n_entries = 0;
+ key32lru_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key32_lru_ops,
(void *)&key32lru_params, (void *)key32lru, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key32lru_params.n_entries = 1<<16;
+ key32lru_params.n_keys = 1<<16;
key32lru_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key32_lru_ops,
@@ -616,14 +624,15 @@ test_table_hash8ext(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key8_ext_params key8ext_params = {
- .n_entries = 1<<16,
- .n_entries_ext = 1<<15,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
+ struct rte_table_hash_params key8ext_params = {
+ .name = "TABLE",
+ .key_size = 8,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
uint8_t key8ext[8];
@@ -652,14 +661,14 @@ test_table_hash8ext(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key8ext_params.n_entries = 0;
+ key8ext_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key8_ext_ops,
(void *)&key8ext_params, (void *)key8ext, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key8ext_params.n_entries = 1<<16;
+ key8ext_params.n_keys = 1<<16;
key8ext_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key8_ext_ops,
@@ -667,13 +676,6 @@ test_table_hash8ext(void)
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key8ext_params.f_hash = pipeline_test_hash;
- key8ext_params.n_entries_ext = 0;
-
- status = test_table_type(&rte_table_hash_key8_ext_ops,
- (void *)&key8ext_params, (void *)key8ext, &table_packets, NULL, 0);
- VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
-
return 0;
}
@@ -683,14 +685,15 @@ test_table_hash16ext(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key16_ext_params key16ext_params = {
- .n_entries = 1<<16,
- .n_entries_ext = 1<<15,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
+ struct rte_table_hash_params key16ext_params = {
+ .name = "TABLE",
+ .key_size = 16,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
uint8_t key16ext[16];
@@ -719,14 +722,14 @@ test_table_hash16ext(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key16ext_params.n_entries = 0;
+ key16ext_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key16_ext_ops,
(void *)&key16ext_params, (void *)key16ext, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key16ext_params.n_entries = 1<<16;
+ key16ext_params.n_keys = 1<<16;
key16ext_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key16_ext_ops,
@@ -734,13 +737,6 @@ test_table_hash16ext(void)
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key16ext_params.f_hash = pipeline_test_hash;
- key16ext_params.n_entries_ext = 0;
-
- status = test_table_type(&rte_table_hash_key16_ext_ops,
- (void *)&key16ext_params, (void *)key16ext, &table_packets, NULL, 0);
- VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
-
return 0;
}
@@ -750,13 +746,15 @@ test_table_hash32ext(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_key32_ext_params key32ext_params = {
- .n_entries = 1<<16,
- .n_entries_ext = 1<<15,
+ struct rte_table_hash_params key32ext_params = {
+ .name = "TABLE",
+ .key_size = 32,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
.f_hash = pipeline_test_hash,
.seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
};
uint8_t key32ext[32];
@@ -785,14 +783,14 @@ test_table_hash32ext(void)
VERIFY(status, CHECK_TABLE_OK);
/* Invalid parameters */
- key32ext_params.n_entries = 0;
+ key32ext_params.n_keys = 0;
status = test_table_type(&rte_table_hash_key32_ext_ops,
(void *)&key32ext_params, (void *)key32ext, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key32ext_params.n_entries = 1<<16;
+ key32ext_params.n_keys = 1<<16;
key32ext_params.f_hash = NULL;
status = test_table_type(&rte_table_hash_key32_ext_ops,
@@ -800,14 +798,6 @@ test_table_hash32ext(void)
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
- key32ext_params.f_hash = pipeline_test_hash;
- key32ext_params.n_entries_ext = 0;
-
- status = test_table_type(&rte_table_hash_key32_ext_ops,
- (void *)&key32ext_params, (void *)key32ext, &table_packets,
- NULL, 0);
- VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
-
return 0;
}
@@ -817,14 +807,15 @@ test_table_hash_cuckoo_combined(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_cuckoo_params cuckoo_params = {
+ struct rte_table_hash_params cuckoo_params = {
+ .name = "TABLE",
.key_size = 32,
- .n_keys = 1<<16,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
.f_hash = pipeline_test_hash,
.seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
- .key_offset = APP_METADATA_OFFSET(32),
- .name = "CUCKOO_HASH",
};
uint8_t key_cuckoo[32];
@@ -847,7 +838,7 @@ test_table_hash_cuckoo_combined(void)
table_packets.n_hit_packets = 50;
table_packets.n_miss_packets = 50;
- status = test_table_type(&rte_table_hash_cuckoo_dosig_ops,
+ status = test_table_type(&rte_table_hash_cuckoo_ops,
(void *)&cuckoo_params, (void *)key_cuckoo, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_OK);
@@ -855,7 +846,7 @@ test_table_hash_cuckoo_combined(void)
/* Invalid parameters */
cuckoo_params.key_size = 0;
- status = test_table_type(&rte_table_hash_cuckoo_dosig_ops,
+ status = test_table_type(&rte_table_hash_cuckoo_ops,
(void *)&cuckoo_params, (void *)key_cuckoo, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
@@ -863,7 +854,7 @@ test_table_hash_cuckoo_combined(void)
cuckoo_params.key_size = 32;
cuckoo_params.n_keys = 0;
- status = test_table_type(&rte_table_hash_cuckoo_dosig_ops,
+ status = test_table_type(&rte_table_hash_cuckoo_ops,
(void *)&cuckoo_params, (void *)key_cuckoo, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
@@ -871,7 +862,7 @@ test_table_hash_cuckoo_combined(void)
cuckoo_params.n_keys = 1<<16;
cuckoo_params.f_hash = NULL;
- status = test_table_type(&rte_table_hash_cuckoo_dosig_ops,
+ status = test_table_type(&rte_table_hash_cuckoo_ops,
(void *)&cuckoo_params, (void *)key_cuckoo, &table_packets,
NULL, 0);
VERIFY(status, CHECK_TABLE_TABLE_CONFIG);
diff --git a/test/test/test_table_tables.c b/test/test/test_table_tables.c
index d835eb9f..a1b0c582 100644
--- a/test/test/test_table_tables.c
+++ b/test/test/test_table_tables.c
@@ -60,16 +60,16 @@ table_test table_tests[] = {
memset(key, 0, 32); \
k32 = (uint32_t *) key; \
k32[0] = (value); \
- *signature = pipeline_test_hash(key, 0, 0); \
+ *signature = pipeline_test_hash(key, NULL, 0, 0); \
} while (0)
unsigned n_table_tests = RTE_DIM(table_tests);
/* Function prototypes */
static int
-test_table_hash_lru_generic(struct rte_table_ops *ops);
+test_table_hash_lru_generic(struct rte_table_ops *ops, uint32_t key_size);
static int
-test_table_hash_ext_generic(struct rte_table_ops *ops);
+test_table_hash_ext_generic(struct rte_table_ops *ops, uint32_t key_size);
struct rte_bucket_4_8 {
/* Cache line 0 */
@@ -655,7 +655,7 @@ test_table_lpm_ipv6(void)
}
static int
-test_table_hash_lru_generic(struct rte_table_ops *ops)
+test_table_hash_lru_generic(struct rte_table_ops *ops, uint32_t key_size)
{
int status, i;
uint64_t expected_mask = 0, result_mask;
@@ -667,36 +667,24 @@ test_table_hash_lru_generic(struct rte_table_ops *ops)
int key_found;
/* Initialize params and create tables */
- struct rte_table_hash_key8_lru_params hash_params = {
- .n_entries = 1 << 10,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(1),
+ struct rte_table_hash_params hash_params = {
+ .name = "TABLE",
+ .key_size = key_size,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 10,
+ .n_buckets = 1 << 10,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
- hash_params.n_entries = 0;
+ hash_params.n_keys = 0;
table = ops->f_create(&hash_params, 0, 1);
if (table != NULL)
return -1;
- hash_params.n_entries = 1 << 10;
- hash_params.signature_offset = APP_METADATA_OFFSET(1);
-
- table = ops->f_create(&hash_params, 0, 1);
- if (table == NULL)
- return -2;
-
- hash_params.signature_offset = APP_METADATA_OFFSET(0);
- hash_params.key_offset = APP_METADATA_OFFSET(1);
-
- table = ops->f_create(&hash_params, 0, 1);
- if (table == NULL)
- return -3;
-
- hash_params.key_offset = APP_METADATA_OFFSET(32);
+ hash_params.n_keys = 1 << 10;
hash_params.f_hash = NULL;
table = ops->f_create(&hash_params, 0, 1);
@@ -770,7 +758,7 @@ test_table_hash_lru_generic(struct rte_table_ops *ops)
}
static int
-test_table_hash_ext_generic(struct rte_table_ops *ops)
+test_table_hash_ext_generic(struct rte_table_ops *ops, uint32_t key_size)
{
int status, i;
uint64_t expected_mask = 0, result_mask;
@@ -782,35 +770,24 @@ test_table_hash_ext_generic(struct rte_table_ops *ops)
void *entry_ptr;
/* Initialize params and create tables */
- struct rte_table_hash_key8_ext_params hash_params = {
- .n_entries = 1 << 10,
- .n_entries_ext = 1 << 4,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(1),
+ struct rte_table_hash_params hash_params = {
+ .name = "TABLE",
+ .key_size = key_size,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
+ .n_keys = 1 << 10,
+ .n_buckets = 1 << 10,
+ .f_hash = pipeline_test_hash,
+ .seed = 0,
};
- hash_params.n_entries = 0;
+ hash_params.n_keys = 0;
table = ops->f_create(&hash_params, 0, 1);
if (table != NULL)
return -1;
- hash_params.n_entries = 1 << 10;
- hash_params.n_entries_ext = 0;
- table = ops->f_create(&hash_params, 0, 1);
- if (table != NULL)
- return -2;
-
- hash_params.n_entries_ext = 1 << 4;
- hash_params.signature_offset = APP_METADATA_OFFSET(1);
- table = ops->f_create(&hash_params, 0, 1);
- if (table == NULL)
- return -2;
-
- hash_params.signature_offset = APP_METADATA_OFFSET(0);
+ hash_params.n_keys = 1 << 10;
hash_params.key_offset = APP_METADATA_OFFSET(1);
table = ops->f_create(&hash_params, 0, 1);
@@ -895,20 +872,21 @@ test_table_hash_lru(void)
{
int status;
- status = test_table_hash_lru_generic(&rte_table_hash_key8_lru_ops);
- if (status < 0)
- return status;
-
status = test_table_hash_lru_generic(
- &rte_table_hash_key8_lru_dosig_ops);
+ &rte_table_hash_key8_lru_ops,
+ 8);
if (status < 0)
return status;
- status = test_table_hash_lru_generic(&rte_table_hash_key16_lru_ops);
+ status = test_table_hash_lru_generic(
+ &rte_table_hash_key16_lru_ops,
+ 16);
if (status < 0)
return status;
- status = test_table_hash_lru_generic(&rte_table_hash_key32_lru_ops);
+ status = test_table_hash_lru_generic(
+ &rte_table_hash_key32_lru_ops,
+ 32);
if (status < 0)
return status;
@@ -924,20 +902,15 @@ test_table_hash_ext(void)
{
int status;
- status = test_table_hash_ext_generic(&rte_table_hash_key8_ext_ops);
+ status = test_table_hash_ext_generic(&rte_table_hash_key8_ext_ops, 8);
if (status < 0)
return status;
- status = test_table_hash_ext_generic(
- &rte_table_hash_key8_ext_dosig_ops);
+ status = test_table_hash_ext_generic(&rte_table_hash_key16_ext_ops, 16);
if (status < 0)
return status;
- status = test_table_hash_ext_generic(&rte_table_hash_key16_ext_ops);
- if (status < 0)
- return status;
-
- status = test_table_hash_ext_generic(&rte_table_hash_key32_ext_ops);
+ status = test_table_hash_ext_generic(&rte_table_hash_key32_ext_ops, 32);
if (status < 0)
return status;
@@ -959,23 +932,24 @@ test_table_hash_cuckoo(void)
uint32_t entry_size = 1;
/* Initialize params and create tables */
- struct rte_table_hash_cuckoo_params cuckoo_params = {
+ struct rte_table_hash_params cuckoo_params = {
+ .name = "TABLE",
.key_size = 32,
- .n_keys = 1 << 24,
- .f_hash = pipeline_test_hash,
- .seed = 0,
- .signature_offset = APP_METADATA_OFFSET(0),
.key_offset = APP_METADATA_OFFSET(32),
- .name = "CUCKOO",
+ .key_mask = NULL,
+ .n_keys = 1 << 16,
+ .n_buckets = 1 << 16,
+ .f_hash = (rte_table_hash_op_hash)pipeline_test_hash,
+ .seed = 0,
};
- table = rte_table_hash_cuckoo_dosig_ops.f_create(NULL, 0, entry_size);
+ table = rte_table_hash_cuckoo_ops.f_create(NULL, 0, entry_size);
if (table != NULL)
return -1;
cuckoo_params.key_size = 0;
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params,
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
0, entry_size);
if (table != NULL)
return -2;
@@ -983,7 +957,7 @@ test_table_hash_cuckoo(void)
cuckoo_params.key_size = 32;
cuckoo_params.n_keys = 0;
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params,
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
0, entry_size);
if (table != NULL)
return -3;
@@ -991,7 +965,7 @@ test_table_hash_cuckoo(void)
cuckoo_params.n_keys = 1 << 24;
cuckoo_params.f_hash = NULL;
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params,
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
0, entry_size);
if (table != NULL)
return -4;
@@ -999,24 +973,24 @@ test_table_hash_cuckoo(void)
cuckoo_params.f_hash = pipeline_test_hash;
cuckoo_params.name = NULL;
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params,
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
0, entry_size);
if (table != NULL)
return -5;
cuckoo_params.name = "CUCKOO";
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params,
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
0, entry_size);
if (table == NULL)
return -6;
/* Free */
- status = rte_table_hash_cuckoo_dosig_ops.f_free(table);
+ status = rte_table_hash_cuckoo_ops.f_free(table);
if (status < 0)
return -7;
- status = rte_table_hash_cuckoo_dosig_ops.f_free(NULL);
+ status = rte_table_hash_cuckoo_ops.f_free(NULL);
if (status == 0)
return -8;
@@ -1027,60 +1001,60 @@ test_table_hash_cuckoo(void)
memset(key_cuckoo, 0, 32);
kcuckoo[0] = rte_be_to_cpu_32(0xadadadad);
- table = rte_table_hash_cuckoo_dosig_ops.f_create(&cuckoo_params, 0, 1);
+ table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params, 0, 1);
if (table == NULL)
return -9;
entry = 'A';
- status = rte_table_hash_cuckoo_dosig_ops.f_add(NULL, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_add(NULL, &key_cuckoo,
&entry, &key_found, &entry_ptr);
if (status == 0)
return -10;
- status = rte_table_hash_cuckoo_dosig_ops.f_add(table, NULL, &entry,
+ status = rte_table_hash_cuckoo_ops.f_add(table, NULL, &entry,
&key_found, &entry_ptr);
if (status == 0)
return -11;
- status = rte_table_hash_cuckoo_dosig_ops.f_add(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_add(table, &key_cuckoo,
NULL, &key_found, &entry_ptr);
if (status == 0)
return -12;
- status = rte_table_hash_cuckoo_dosig_ops.f_add(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_add(table, &key_cuckoo,
&entry, &key_found, &entry_ptr);
if (status != 0)
return -13;
- status = rte_table_hash_cuckoo_dosig_ops.f_add(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_add(table, &key_cuckoo,
&entry, &key_found, &entry_ptr);
if (status != 0)
return -14;
/* Delete */
- status = rte_table_hash_cuckoo_dosig_ops.f_delete(NULL, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_delete(NULL, &key_cuckoo,
&key_found, NULL);
if (status == 0)
return -15;
- status = rte_table_hash_cuckoo_dosig_ops.f_delete(table, NULL,
+ status = rte_table_hash_cuckoo_ops.f_delete(table, NULL,
&key_found, NULL);
if (status == 0)
return -16;
- status = rte_table_hash_cuckoo_dosig_ops.f_delete(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_delete(table, &key_cuckoo,
&key_found, NULL);
if (status != 0)
return -17;
- status = rte_table_hash_cuckoo_dosig_ops.f_delete(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_delete(table, &key_cuckoo,
&key_found, NULL);
if (status != -ENOENT)
return -18;
/* Traffic flow */
entry = 'A';
- status = rte_table_hash_cuckoo_dosig_ops.f_add(table, &key_cuckoo,
+ status = rte_table_hash_cuckoo_ops.f_add(table, &key_cuckoo,
&entry, &key_found,
&entry_ptr);
if (status < 0)
@@ -1093,7 +1067,7 @@ test_table_hash_cuckoo(void)
} else
PREPARE_PACKET(mbufs[i], 0xadadadab);
- rte_table_hash_cuckoo_dosig_ops.f_lookup(table, mbufs, -1,
+ rte_table_hash_cuckoo_ops.f_lookup(table, mbufs, -1,
&result_mask, (void **)entries);
if (result_mask != expected_mask)
return -20;
@@ -1102,7 +1076,7 @@ test_table_hash_cuckoo(void)
for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
rte_pktmbuf_free(mbufs[i]);
- status = rte_table_hash_cuckoo_dosig_ops.f_free(table);
+ status = rte_table_hash_cuckoo_ops.f_free(table);
return 0;
}
diff --git a/test/test/test_timer.c b/test/test/test_timer.c
index de0c312f..04c23989 100644
--- a/test/test/test_timer.c
+++ b/test/test/test_timer.c
@@ -126,7 +126,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 9d46ad56..b57a9493 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -34,6 +34,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
@@ -211,13 +212,15 @@ virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
return -1;
}
-static void
+static int
virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
if (stats)
rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
+
+ return 0;
}
static void
@@ -261,7 +264,7 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
void
-virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
@@ -275,7 +278,7 @@ virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
}
void
-virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
@@ -288,7 +291,7 @@ virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
}
void
-virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
@@ -301,7 +304,7 @@ virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
}
void
-virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
@@ -314,7 +317,7 @@ virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
}
void
-virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
@@ -430,7 +433,7 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
void
-virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
@@ -442,7 +445,7 @@ virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
void
-virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
{
struct virtual_ethdev_private *dev_private = NULL;
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
@@ -458,7 +461,7 @@ virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
}
void
-virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
+virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
uint8_t packet_fail_count)
{
struct virtual_ethdev_private *dev_private = NULL;
@@ -470,7 +473,7 @@ virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
}
void
-virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status)
+virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
@@ -478,7 +481,7 @@ virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status)
}
void
-virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
+virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
uint8_t link_status)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
@@ -490,7 +493,7 @@ virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
}
int
-virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
+virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
@@ -502,7 +505,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
}
int
-virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
+virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length)
{
struct virtual_ethdev_private *dev_private;
diff --git a/test/test/virtual_pmd.h b/test/test/virtual_pmd.h
index de001884..7c53dd34 100644
--- a/test/test/virtual_pmd.h
+++ b/test/test/virtual_pmd.h
@@ -48,53 +48,55 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
uint8_t socket_id, uint8_t isr_support);
void
-virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status);
+virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status);
void
-virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
+virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
uint8_t link_status);
int
-virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
+virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
struct rte_mbuf **pkts_burst, int burst_length);
int
-virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
+virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length);
/** Control methods for the dev_ops functions pointer to control the behavior
* of the Virtual PMD */
void
-virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_stop_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_stop_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id,
+ uint8_t success);
void
-virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id,
+ uint8_t success);
void
-virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success);
/* if a value greater than zero is set for packet_fail_count then virtual
* device tx burst function will fail that many packet from burst or all
* packets if packet_fail_count is greater than the number of packets in the
* burst */
void
-virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
+virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
uint8_t packet_fail_count);
#ifdef __cplusplus
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index da6e40cb..a539995c 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -204,8 +204,7 @@ def check_modules():
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
- sysfs_mods = map(lambda a:
- a if a != 'vfio_pci' else 'vfio-pci', sysfs_mods)
+ sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]
for mod in mods:
if mod["Name"] in sysfs_mods: